arm64: Use __pa_symbol for kernel symbols
authorLaura Abbott <labbott@redhat.com>
Tue, 10 Jan 2017 21:35:49 +0000 (13:35 -0800)
committerWill Deacon <will.deacon@arm.com>
Thu, 12 Jan 2017 15:05:39 +0000 (15:05 +0000)
__pa_symbol is technically the marcro that should be used for kernel
symbols. Switch to this as a pre-requisite for DEBUG_VIRTUAL which
will do bounds checking.

Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Tested-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Laura Abbott <labbott@redhat.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
16 files changed:
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/mmu_context.h
arch/arm64/include/asm/pgtable.h
arch/arm64/kernel/acpi_parking_protocol.c
arch/arm64/kernel/cpu-reset.h
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/hibernate.c
arch/arm64/kernel/insn.c
arch/arm64/kernel/psci.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/smp_spin_table.c
arch/arm64/kernel/vdso.c
arch/arm64/mm/init.c
arch/arm64/mm/kasan_init.c
arch/arm64/mm/mmu.c

index 6f72fe8b0e3ee477a076fa5f9ef02d13c3d8eef3..55772c13a375edf6fcb2ec15fc39c5ac2ec077b4 100644 (file)
@@ -47,7 +47,7 @@
  * If the page is in the bottom half, we have to use the top half. If
  * the page is in the top half, we have to use the bottom half:
  *
- * T = __virt_to_phys(__hyp_idmap_text_start)
+ * T = __pa_symbol(__hyp_idmap_text_start)
  * if (T & BIT(VA_BITS - 1))
  *     HYP_VA_MIN = 0  //idmap in upper half
  * else
@@ -271,7 +271,7 @@ static inline void __kvm_flush_dcache_pud(pud_t pud)
        kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
 }
 
-#define kvm_virt_to_phys(x)            __virt_to_phys((unsigned long)(x))
+#define kvm_virt_to_phys(x)            __pa_symbol(x)
 
 void kvm_set_way_flush(struct kvm_vcpu *vcpu);
 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
index cd6e3eec0efefde2bf2536696bab42a5702056b1..0ff237a75c0569dda68813be965aee1efc8ec601 100644 (file)
@@ -210,6 +210,7 @@ static inline void *phys_to_virt(phys_addr_t x)
 #define __va(x)                        ((void *)__phys_to_virt((phys_addr_t)(x)))
 #define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
 #define virt_to_pfn(x)      __phys_to_pfn(__virt_to_phys((unsigned long)(x)))
+#define sym_to_pfn(x)      __phys_to_pfn(__pa_symbol(x))
 
 /*
  *  virt_to_page(k)    convert a _valid_ virtual address to struct page *
index 0363fe80455ce3b0c306b34aa8671d846bcc95bb..63e9982daca19447b90c61655870b6c368ad2819 100644 (file)
@@ -45,7 +45,7 @@ static inline void contextidr_thread_switch(struct task_struct *next)
  */
 static inline void cpu_set_reserved_ttbr0(void)
 {
-       unsigned long ttbr = virt_to_phys(empty_zero_page);
+       unsigned long ttbr = __pa_symbol(empty_zero_page);
 
        write_sysreg(ttbr, ttbr0_el1);
        isb();
@@ -114,7 +114,7 @@ static inline void cpu_install_idmap(void)
        local_flush_tlb_all();
        cpu_set_idmap_tcr_t0sz();
 
-       cpu_switch_mm(idmap_pg_dir, &init_mm);
+       cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm);
 }
 
 /*
@@ -129,7 +129,7 @@ static inline void cpu_replace_ttbr1(pgd_t *pgd)
 
        phys_addr_t pgd_phys = virt_to_phys(pgd);
 
-       replace_phys = (void *)virt_to_phys(idmap_cpu_replace_ttbr1);
+       replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
 
        cpu_install_idmap();
        replace_phys(pgd_phys);
index ffbb9a52056314225bfe5e119830d3a0170b8ad5..090134c346dbcc7a1d9f04e091bd6f640733cdfd 100644 (file)
@@ -52,7 +52,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
  * for zero-mapped memory areas etc..
  */
 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
-#define ZERO_PAGE(vaddr)       pfn_to_page(PHYS_PFN(__pa(empty_zero_page)))
+#define ZERO_PAGE(vaddr)       phys_to_page(__pa_symbol(empty_zero_page))
 
 #define pte_ERROR(pte)         __pte_error(__FILE__, __LINE__, pte_val(pte))
 
index a32b4011d711c42d033c7675a87686bfa33d867c..1f5655cd9cc93280b92417370be49c808fe3bb1d 100644 (file)
@@ -17,6 +17,7 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 #include <linux/acpi.h>
+#include <linux/mm.h>
 #include <linux/types.h>
 
 #include <asm/cpu_ops.h>
@@ -109,7 +110,7 @@ static int acpi_parking_protocol_cpu_boot(unsigned int cpu)
         * that read this address need to convert this address to the
         * Boot-Loader's endianness before jumping.
         */
-       writeq_relaxed(__pa(secondary_entry), &mailbox->entry_point);
+       writeq_relaxed(__pa_symbol(secondary_entry), &mailbox->entry_point);
        writel_relaxed(cpu_entry->gic_cpu_id, &mailbox->cpu_id);
 
        arch_send_wakeup_ipi_mask(cpumask_of(cpu));
index d4e9ecb264f00d6f37bf2ecdb4436fd1a88fed0c..6c2b1b4f57c9a457a75209428647061273014dc1 100644 (file)
@@ -24,7 +24,7 @@ static inline void __noreturn cpu_soft_restart(unsigned long el2_switch,
 
        el2_switch = el2_switch && !is_kernel_in_hyp_mode() &&
                is_hyp_mode_available();
-       restart = (void *)virt_to_phys(__cpu_soft_restart);
+       restart = (void *)__pa_symbol(__cpu_soft_restart);
 
        cpu_install_idmap();
        restart(el2_switch, entry, arg0, arg1, arg2);
index d249f43910781cfd49d8191525162c733920ee50..1661750dabfb41283ee8546e415456e8ef924722 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/sort.h>
 #include <linux/stop_machine.h>
 #include <linux/types.h>
+#include <linux/mm.h>
 #include <asm/cpu.h>
 #include <asm/cpufeature.h>
 #include <asm/cpu_ops.h>
@@ -746,7 +747,7 @@ static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused
 static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry,
                           int __unused)
 {
-       phys_addr_t idmap_addr = virt_to_phys(__hyp_idmap_text_start);
+       phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
 
        /*
         * Activate the lower HYP offset only if:
index 1b918aaed5389940dd83077510162f47a88a9ed8..97a7384100f392237137eb2bd8ca14252317e45c 100644 (file)
@@ -50,9 +50,6 @@
  */
 extern int in_suspend;
 
-/* Find a symbols alias in the linear map */
-#define LMADDR(x)      phys_to_virt(virt_to_phys(x))
-
 /* Do we need to reset el2? */
 #define el2_reset_needed() (is_hyp_mode_available() && !is_kernel_in_hyp_mode())
 
@@ -102,8 +99,8 @@ static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i)
 
 int pfn_is_nosave(unsigned long pfn)
 {
-       unsigned long nosave_begin_pfn = virt_to_pfn(&__nosave_begin);
-       unsigned long nosave_end_pfn = virt_to_pfn(&__nosave_end - 1);
+       unsigned long nosave_begin_pfn = sym_to_pfn(&__nosave_begin);
+       unsigned long nosave_end_pfn = sym_to_pfn(&__nosave_end - 1);
 
        return (pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn);
 }
@@ -125,12 +122,12 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
                return -EOVERFLOW;
 
        arch_hdr_invariants(&hdr->invariants);
-       hdr->ttbr1_el1          = virt_to_phys(swapper_pg_dir);
+       hdr->ttbr1_el1          = __pa_symbol(swapper_pg_dir);
        hdr->reenter_kernel     = _cpu_resume;
 
        /* We can't use __hyp_get_vectors() because kvm may still be loaded */
        if (el2_reset_needed())
-               hdr->__hyp_stub_vectors = virt_to_phys(__hyp_stub_vectors);
+               hdr->__hyp_stub_vectors = __pa_symbol(__hyp_stub_vectors);
        else
                hdr->__hyp_stub_vectors = 0;
 
@@ -460,7 +457,6 @@ int swsusp_arch_resume(void)
        void *zero_page;
        size_t exit_size;
        pgd_t *tmp_pg_dir;
-       void *lm_restore_pblist;
        phys_addr_t phys_hibernate_exit;
        void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
                                          void *, phys_addr_t, phys_addr_t);
@@ -480,12 +476,6 @@ int swsusp_arch_resume(void)
        if (rc)
                goto out;
 
-       /*
-        * Since we only copied the linear map, we need to find restore_pblist's
-        * linear map address.
-        */
-       lm_restore_pblist = LMADDR(restore_pblist);
-
        /*
         * We need a zero page that is zero before & after resume in order to
         * to break before make on the ttbr1 page tables.
@@ -537,7 +527,7 @@ int swsusp_arch_resume(void)
        }
 
        hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
-                      resume_hdr.reenter_kernel, lm_restore_pblist,
+                      resume_hdr.reenter_kernel, restore_pblist,
                       resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
 
 out:
index 1f44cf8c73e68679019de7a409244583b98c71ed..022d4a9d1738789cd25090b090a0af642181db18 100644 (file)
@@ -96,7 +96,7 @@ static void __kprobes *patch_map(void *addr, int fixmap)
        if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
                page = vmalloc_to_page(addr);
        else if (!module)
-               page = pfn_to_page(PHYS_PFN(__pa(addr)));
+               page = phys_to_page(__pa_symbol(addr));
        else
                return addr;
 
index 42816bebb1e0f732d788780fde431028f202c31a..e8edbf13302aad06875703c5b680dd3513c4bb91 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/smp.h>
 #include <linux/delay.h>
 #include <linux/psci.h>
+#include <linux/mm.h>
 
 #include <uapi/linux/psci.h>
 
@@ -45,7 +46,7 @@ static int __init cpu_psci_cpu_prepare(unsigned int cpu)
 
 static int cpu_psci_cpu_boot(unsigned int cpu)
 {
-       int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_entry));
+       int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa_symbol(secondary_entry));
        if (err)
                pr_err("failed to boot CPU%d (%d)\n", cpu, err);
 
index b051367e21491cada86f115f4386fda8aace299a..669fc9ff728b227f991ef61462a9607d1027bafd 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/of_fdt.h>
 #include <linux/efi.h>
 #include <linux/psci.h>
+#include <linux/mm.h>
 
 #include <asm/acpi.h>
 #include <asm/fixmap.h>
@@ -199,10 +200,10 @@ static void __init request_standard_resources(void)
        struct memblock_region *region;
        struct resource *res;
 
-       kernel_code.start   = virt_to_phys(_text);
-       kernel_code.end     = virt_to_phys(__init_begin - 1);
-       kernel_data.start   = virt_to_phys(_sdata);
-       kernel_data.end     = virt_to_phys(_end - 1);
+       kernel_code.start   = __pa_symbol(_text);
+       kernel_code.end     = __pa_symbol(__init_begin - 1);
+       kernel_data.start   = __pa_symbol(_sdata);
+       kernel_data.end     = __pa_symbol(_end - 1);
 
        for_each_memblock(memory, region) {
                res = alloc_bootmem_low(sizeof(*res));
index 9a00eee9acc842993b9114b09f69998901a9cc06..93034651c87ea37517cc1db519ab4e097eee45d8 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/of.h>
 #include <linux/smp.h>
 #include <linux/types.h>
+#include <linux/mm.h>
 
 #include <asm/cacheflush.h>
 #include <asm/cpu_ops.h>
@@ -98,7 +99,7 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
         * boot-loader's endianess before jumping. This is mandated by
         * the boot protocol.
         */
-       writeq_relaxed(__pa(secondary_holding_pen), release_addr);
+       writeq_relaxed(__pa_symbol(secondary_holding_pen), release_addr);
        __flush_dcache_area((__force void *)release_addr,
                            sizeof(*release_addr));
 
index a2c2478e7d7896d568e7b9a04849f4adacb61fdf..41b6e31f8f556f4d4af8ecdafd3867e540a8ed10 100644 (file)
@@ -123,6 +123,7 @@ static int __init vdso_init(void)
 {
        int i;
        struct page **vdso_pagelist;
+       unsigned long pfn;
 
        if (memcmp(&vdso_start, "\177ELF", 4)) {
                pr_err("vDSO is not a valid ELF object!\n");
@@ -140,11 +141,14 @@ static int __init vdso_init(void)
                return -ENOMEM;
 
        /* Grab the vDSO data page. */
-       vdso_pagelist[0] = pfn_to_page(PHYS_PFN(__pa(vdso_data)));
+       vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
+
 
        /* Grab the vDSO code pages. */
+       pfn = sym_to_pfn(&vdso_start);
+
        for (i = 0; i < vdso_pages; i++)
-               vdso_pagelist[i + 1] = pfn_to_page(PHYS_PFN(__pa(&vdso_start)) + i);
+               vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
 
        vdso_spec[0].pages = &vdso_pagelist[0];
        vdso_spec[1].pages = &vdso_pagelist[1];
index 716d1226ba6925babc28bc6c14dc175e739c7f57..8a2713018f2f9bd79fc878b518dbf73caf310a43 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/efi.h>
 #include <linux/swiotlb.h>
 #include <linux/vmalloc.h>
+#include <linux/mm.h>
 
 #include <asm/boot.h>
 #include <asm/fixmap.h>
@@ -209,8 +210,8 @@ void __init arm64_memblock_init(void)
         * linear mapping. Take care not to clip the kernel which may be
         * high in memory.
         */
-       memblock_remove(max_t(u64, memstart_addr + linear_region_size, __pa(_end)),
-                       ULLONG_MAX);
+       memblock_remove(max_t(u64, memstart_addr + linear_region_size,
+                       __pa_symbol(_end)), ULLONG_MAX);
        if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {
                /* ensure that memstart_addr remains sufficiently aligned */
                memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size,
@@ -225,7 +226,7 @@ void __init arm64_memblock_init(void)
         */
        if (memory_limit != (phys_addr_t)ULLONG_MAX) {
                memblock_mem_limit_remove_map(memory_limit);
-               memblock_add(__pa(_text), (u64)(_end - _text));
+               memblock_add(__pa_symbol(_text), (u64)(_end - _text));
        }
 
        if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_start) {
@@ -278,7 +279,7 @@ void __init arm64_memblock_init(void)
         * Register the kernel text, kernel data, initrd, and initial
         * pagetables with memblock.
         */
-       memblock_reserve(__pa(_text), _end - _text);
+       memblock_reserve(__pa_symbol(_text), _end - _text);
 #ifdef CONFIG_BLK_DEV_INITRD
        if (initrd_start) {
                memblock_reserve(initrd_start, initrd_end - initrd_start);
@@ -484,7 +485,8 @@ void __init mem_init(void)
 
 void free_initmem(void)
 {
-       free_reserved_area(__va(__pa(__init_begin)), __va(__pa(__init_end)),
+       free_reserved_area(lm_alias(__init_begin),
+                          lm_alias(__init_end),
                           0, "unused kernel");
        /*
         * Unmap the __init region but leave the VM area in place. This
index 757009daa9ede454abccbcab6b3a0421a355b49c..201d918e75759d4938e62575083ede5b7775aff5 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/kernel.h>
 #include <linux/memblock.h>
 #include <linux/start_kernel.h>
+#include <linux/mm.h>
 
 #include <asm/mmu_context.h>
 #include <asm/kernel-pgtable.h>
 
 static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
 
+/*
+ * The p*d_populate functions call virt_to_phys implicitly so they can't be used
+ * directly on kernel symbols (bm_p*d). All the early functions are called too
+ * early to use lm_alias so __p*d_populate functions must be used to populate
+ * with the physical address from __pa_symbol.
+ */
+
 static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr,
                                        unsigned long end)
 {
@@ -33,12 +41,12 @@ static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr,
        unsigned long next;
 
        if (pmd_none(*pmd))
-               pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
+               __pmd_populate(pmd, __pa_symbol(kasan_zero_pte), PMD_TYPE_TABLE);
 
        pte = pte_offset_kimg(pmd, addr);
        do {
                next = addr + PAGE_SIZE;
-               set_pte(pte, pfn_pte(virt_to_pfn(kasan_zero_page),
+               set_pte(pte, pfn_pte(sym_to_pfn(kasan_zero_page),
                                        PAGE_KERNEL));
        } while (pte++, addr = next, addr != end && pte_none(*pte));
 }
@@ -51,7 +59,7 @@ static void __init kasan_early_pmd_populate(pud_t *pud,
        unsigned long next;
 
        if (pud_none(*pud))
-               pud_populate(&init_mm, pud, kasan_zero_pmd);
+               __pud_populate(pud, __pa_symbol(kasan_zero_pmd), PMD_TYPE_TABLE);
 
        pmd = pmd_offset_kimg(pud, addr);
        do {
@@ -68,7 +76,7 @@ static void __init kasan_early_pud_populate(pgd_t *pgd,
        unsigned long next;
 
        if (pgd_none(*pgd))
-               pgd_populate(&init_mm, pgd, kasan_zero_pud);
+               __pgd_populate(pgd, __pa_symbol(kasan_zero_pud), PUD_TYPE_TABLE);
 
        pud = pud_offset_kimg(pgd, addr);
        do {
@@ -148,7 +156,7 @@ void __init kasan_init(void)
         */
        memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
        dsb(ishst);
-       cpu_replace_ttbr1(tmp_pg_dir);
+       cpu_replace_ttbr1(lm_alias(tmp_pg_dir));
 
        clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
 
@@ -199,10 +207,10 @@ void __init kasan_init(void)
         */
        for (i = 0; i < PTRS_PER_PTE; i++)
                set_pte(&kasan_zero_pte[i],
-                       pfn_pte(virt_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
+                       pfn_pte(sym_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
 
        memset(kasan_zero_page, 0, PAGE_SIZE);
-       cpu_replace_ttbr1(swapper_pg_dir);
+       cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
 
        /* At this point kasan is fully initialized. Enable error messages */
        init_task.kasan_depth = 0;
index 17243e43184e9d34665bc3abb122724009881db4..a43415784e125611e59a441e6e1a95d162888b87 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/memblock.h>
 #include <linux/fs.h>
 #include <linux/io.h>
+#include <linux/mm.h>
 
 #include <asm/barrier.h>
 #include <asm/cputype.h>
@@ -359,8 +360,8 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt,
 
 static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
 {
-       unsigned long kernel_start = __pa(_text);
-       unsigned long kernel_end = __pa(__init_begin);
+       unsigned long kernel_start = __pa_symbol(_text);
+       unsigned long kernel_end = __pa_symbol(__init_begin);
 
        /*
         * Take care not to create a writable alias for the
@@ -427,14 +428,14 @@ void mark_rodata_ro(void)
        unsigned long section_size;
 
        section_size = (unsigned long)_etext - (unsigned long)_text;
-       create_mapping_late(__pa(_text), (unsigned long)_text,
+       create_mapping_late(__pa_symbol(_text), (unsigned long)_text,
                            section_size, PAGE_KERNEL_ROX);
        /*
         * mark .rodata as read only. Use __init_begin rather than __end_rodata
         * to cover NOTES and EXCEPTION_TABLE.
         */
        section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
-       create_mapping_late(__pa(__start_rodata), (unsigned long)__start_rodata,
+       create_mapping_late(__pa_symbol(__start_rodata), (unsigned long)__start_rodata,
                            section_size, PAGE_KERNEL_RO);
 
        /* flush the TLBs after updating live kernel mappings */
@@ -446,7 +447,7 @@ void mark_rodata_ro(void)
 static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
                                      pgprot_t prot, struct vm_struct *vma)
 {
-       phys_addr_t pa_start = __pa(va_start);
+       phys_addr_t pa_start = __pa_symbol(va_start);
        unsigned long size = va_end - va_start;
 
        BUG_ON(!PAGE_ALIGNED(pa_start));
@@ -494,7 +495,7 @@ static void __init map_kernel(pgd_t *pgd)
                 */
                BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
                set_pud(pud_set_fixmap_offset(pgd, FIXADDR_START),
-                       __pud(__pa(bm_pmd) | PUD_TYPE_TABLE));
+                       __pud(__pa_symbol(bm_pmd) | PUD_TYPE_TABLE));
                pud_clear_fixmap();
        } else {
                BUG();
@@ -525,7 +526,7 @@ void __init paging_init(void)
         */
        cpu_replace_ttbr1(__va(pgd_phys));
        memcpy(swapper_pg_dir, pgd, PAGE_SIZE);
-       cpu_replace_ttbr1(swapper_pg_dir);
+       cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
 
        pgd_clear_fixmap();
        memblock_free(pgd_phys, PAGE_SIZE);
@@ -534,7 +535,7 @@ void __init paging_init(void)
         * We only reuse the PGD from the swapper_pg_dir, not the pud + pmd
         * allocated with it.
         */
-       memblock_free(__pa(swapper_pg_dir) + PAGE_SIZE,
+       memblock_free(__pa_symbol(swapper_pg_dir) + PAGE_SIZE,
                      SWAPPER_DIR_SIZE - PAGE_SIZE);
 }
 
@@ -645,6 +646,12 @@ static inline pte_t * fixmap_pte(unsigned long addr)
        return &bm_pte[pte_index(addr)];
 }
 
+/*
+ * The p*d_populate functions call virt_to_phys implicitly so they can't be used
+ * directly on kernel symbols (bm_p*d). This function is called too early to use
+ * lm_alias so __p*d_populate functions must be used to populate with the
+ * physical address from __pa_symbol.
+ */
 void __init early_fixmap_init(void)
 {
        pgd_t *pgd;
@@ -654,7 +661,7 @@ void __init early_fixmap_init(void)
 
        pgd = pgd_offset_k(addr);
        if (CONFIG_PGTABLE_LEVELS > 3 &&
-           !(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa(bm_pud))) {
+           !(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa_symbol(bm_pud))) {
                /*
                 * We only end up here if the kernel mapping and the fixmap
                 * share the top level pgd entry, which should only happen on
@@ -663,12 +670,14 @@ void __init early_fixmap_init(void)
                BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
                pud = pud_offset_kimg(pgd, addr);
        } else {
-               pgd_populate(&init_mm, pgd, bm_pud);
+               if (pgd_none(*pgd))
+                       __pgd_populate(pgd, __pa_symbol(bm_pud), PUD_TYPE_TABLE);
                pud = fixmap_pud(addr);
        }
-       pud_populate(&init_mm, pud, bm_pmd);
+       if (pud_none(*pud))
+               __pud_populate(pud, __pa_symbol(bm_pmd), PMD_TYPE_TABLE);
        pmd = fixmap_pmd(addr);
-       pmd_populate_kernel(&init_mm, pmd, bm_pte);
+       __pmd_populate(pmd, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
 
        /*
         * The boot-ioremap range spans multiple pmds, for which