Booting current 64-bit x86 kernels on the latest Apple MacBook
(MacBook5,2) via EFI gives the following warning:
[ 0.182209] ------------[ cut here ]------------
[ 0.182222] WARNING: at arch/x86/mm/pageattr.c:581 __cpa_process_fault+0x44/0xa0()
[ 0.182227] Hardware name: MacBook5,2
[ 0.182231] CPA: called for zero pte. vaddr =
ffff8800ffe00000 cpa->vaddr =
ffff8800ffe00000
[ 0.182236] Modules linked in:
[ 0.182242] Pid: 0, comm: swapper Not tainted 2.6.31-rc4 #6
[ 0.182246] Call Trace:
[ 0.182254] [<
ffffffff8102c754>] ? __cpa_process_fault+0x44/0xa0
[ 0.182261] [<
ffffffff81048668>] warn_slowpath_common+0x78/0xd0
[ 0.182266] [<
ffffffff81048744>] warn_slowpath_fmt+0x64/0x70
[ 0.182272] [<
ffffffff8102c7ec>] ? update_page_count+0x3c/0x50
[ 0.182280] [<
ffffffff818d25c5>] ? phys_pmd_init+0x140/0x22e
[ 0.182286] [<
ffffffff8102c754>] __cpa_process_fault+0x44/0xa0
[ 0.182292] [<
ffffffff8102ce60>] __change_page_attr_set_clr+0x5f0/0xb40
[ 0.182301] [<
ffffffff810d1035>] ? vm_unmap_aliases+0x175/0x190
[ 0.182307] [<
ffffffff8102d4ae>] change_page_attr_set_clr+0xfe/0x3d0
[ 0.182314] [<
ffffffff8102dcca>] _set_memory_uc+0x2a/0x30
[ 0.182319] [<
ffffffff8102dd4b>] set_memory_uc+0x7b/0xb0
[ 0.182327] [<
ffffffff818afe31>] efi_enter_virtual_mode+0x2ad/0x2c9
[ 0.182334] [<
ffffffff818a1c66>] start_kernel+0x2db/0x3f4
[ 0.182340] [<
ffffffff818a1289>] x86_64_start_reservations+0x99/0xb9
[ 0.182345] [<
ffffffff818a1389>] x86_64_start_kernel+0xe0/0xf2
[ 0.182357] ---[ end trace
4eaa2a86a8e2da22 ]---
[ 0.182982] init_memory_mapping:
00000000ffffc000-
0000000100000000
[ 0.182993]
00ffffc000 -
0100000000 page 4k
This happens because the 64-bit version of efi_ioremap calls
init_memory_mapping for all addresses, regardless of whether they are
RAM or MMIO. The EFI tables on this machine ask for runtime access to
some MMIO regions:
[ 0.000000] EFI: mem195: type=11, attr=0x8000000000000000, range=[0x0000000093400000-0x0000000093401000) (0MB)
[ 0.000000] EFI: mem196: type=11, attr=0x8000000000000000, range=[0x00000000ffc00000-0x00000000ffc40000) (0MB)
[ 0.000000] EFI: mem197: type=11, attr=0x8000000000000000, range=[0x00000000ffc40000-0x00000000ffc80000) (0MB)
[ 0.000000] EFI: mem198: type=11, attr=0x8000000000000000, range=[0x00000000ffc80000-0x00000000ffca4000) (0MB)
[ 0.000000] EFI: mem199: type=11, attr=0x8000000000000000, range=[0x00000000ffca4000-0x00000000ffcb4000) (0MB)
[ 0.000000] EFI: mem200: type=11, attr=0x8000000000000000, range=[0x00000000ffcb4000-0x00000000ffffc000) (3MB)
[ 0.000000] EFI: mem201: type=11, attr=0x8000000000000000, range=[0x00000000ffffc000-0x0000000100000000) (0MB)
This arranges to pass the EFI memory type through to efi_ioremap, and
makes efi_ioremap use ioremap rather than init_memory_mapping if the
type is EFI_MEMORY_MAPPED_IO. With this, the above warning goes away.
Signed-off-by: Paul Mackerras <paulus@samba.org>
LKML-Reference: <19062.55858.533494.471153@cargo.ozlabs.ibm.com>
Cc: Huang Ying <ying.huang@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
efi_call_virt(f, a1, a2, a3, a4, a5, a6)
-#define efi_ioremap(addr, size) ioremap_cache(addr, size)
+#define efi_ioremap(addr, size, type) ioremap_cache(addr, size)
#else /* !CONFIG_X86_32 */
efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
(u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
-extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size);
+extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
+ u32 type);
#endif /* CONFIG_X86_32 */
&& end_pfn <= max_pfn_mapped))
va = __va(md->phys_addr);
else
- va = efi_ioremap(md->phys_addr, size);
+ va = efi_ioremap(md->phys_addr, size, md->type);
md->virt_addr = (u64) (unsigned long) va;
early_runtime_code_mapping_set_exec(0);
}
-void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size)
+void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
+ u32 type)
{
unsigned long last_map_pfn;
+ if (type == EFI_MEMORY_MAPPED_IO)
+ return ioremap(phys_addr, size);
+
last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size)
return NULL;