memmap.desc_version = boot_params.efi_info.efi_memdesc_version;
memmap.desc_size = boot_params.efi_info.efi_memdesc_size;
- efi.systab = efi_early_ioremap((unsigned long)efi_phys.systab,
- sizeof(efi_system_table_t));
+ efi.systab = early_ioremap((unsigned long)efi_phys.systab,
+ sizeof(efi_system_table_t));
if (efi.systab == NULL)
printk(KERN_ERR "Couldn't map the EFI system table!\n");
memcpy(&efi_systab, efi.systab, sizeof(efi_system_table_t));
- efi_early_iounmap(efi.systab, sizeof(efi_system_table_t));
+ early_iounmap(efi.systab, sizeof(efi_system_table_t));
efi.systab = &efi_systab;
/*
/*
* Show what we know for posterity
*/
- c16 = tmp = efi_early_ioremap(efi.systab->fw_vendor, 2);
+ c16 = tmp = early_ioremap(efi.systab->fw_vendor, 2);
if (c16) {
for (i = 0; i < sizeof(vendor) && *c16; ++i)
vendor[i] = *c16++;
vendor[i] = '\0';
} else
printk(KERN_ERR PFX "Could not map the firmware vendor!\n");
- efi_early_iounmap(tmp, 2);
+ early_iounmap(tmp, 2);
printk(KERN_INFO "EFI v%u.%.02u by %s \n",
efi.systab->hdr.revision >> 16,
/*
* Let's see what config tables the firmware passed to us.
*/
- config_tables = efi_early_ioremap(
+ config_tables = early_ioremap(
efi.systab->tables,
efi.systab->nr_tables * sizeof(efi_config_table_t));
if (config_tables == NULL)
}
}
printk("\n");
- efi_early_iounmap(config_tables,
+ early_iounmap(config_tables,
efi.systab->nr_tables * sizeof(efi_config_table_t));
/*
* address of several of the EFI runtime functions, needed to
* set the firmware into virtual mode.
*/
- runtime = efi_early_ioremap((unsigned long)efi.systab->runtime,
- sizeof(efi_runtime_services_t));
+ runtime = early_ioremap((unsigned long)efi.systab->runtime,
+ sizeof(efi_runtime_services_t));
if (runtime != NULL) {
/*
* We will only need *early* access to the following
} else
printk(KERN_ERR "Could not map the EFI runtime service "
"table!\n");
- efi_early_iounmap(runtime, sizeof(efi_runtime_services_t));
+ early_iounmap(runtime, sizeof(efi_runtime_services_t));
/* Map the EFI memory map */
- memmap.map = efi_early_ioremap((unsigned long)memmap.phys_map,
- memmap.nr_map * memmap.desc_size);
+ memmap.map = early_ioremap((unsigned long)memmap.phys_map,
+ memmap.nr_map * memmap.desc_size);
if (memmap.map == NULL)
printk(KERN_ERR "Could not map the EFI memory map!\n");
memmap.map_end = memmap.map + (memmap.nr_map * memmap.desc_size);
{
memmap.map = NULL;
- memmap.map = bt_ioremap((unsigned long) memmap.phys_map,
+ memmap.map = early_ioremap((unsigned long) memmap.phys_map,
(memmap.nr_map * memmap.desc_size));
if (memmap.map == NULL)
printk(KERN_ERR "Could not remap the EFI memmap!\n");
if (clen > MAX_MAP_CHUNK-slop)
clen = MAX_MAP_CHUNK-slop;
mapaddr = ramdisk_image & PAGE_MASK;
- p = bt_ioremap(mapaddr, clen+slop);
+ p = early_ioremap(mapaddr, clen+slop);
memcpy(q, p+slop, clen);
- bt_iounmap(p, clen+slop);
+ early_iounmap(p, clen+slop);
q += clen;
ramdisk_image += clen;
ramdisk_size -= clen;
memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
pre_setup_arch_hook();
early_cpu_init();
- bt_ioremap_init();
+ early_ioremap_init();
#ifdef CONFIG_EFI
if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
}
rsdt = (struct acpi_table_rsdt *)
- bt_ioremap(rsdp->rsdt_physical_address, sizeof(struct acpi_table_rsdt));
+ early_ioremap(rsdp->rsdt_physical_address, sizeof(struct acpi_table_rsdt));
if (!rsdt) {
printk(KERN_WARNING
for (i = 0; i < tables; i++) {
/* Map in header, then map in full table length. */
header = (struct acpi_table_header *)
- bt_ioremap(saved_rsdt.table.table_offset_entry[i], sizeof(struct acpi_table_header));
+ early_ioremap(saved_rsdt.table.table_offset_entry[i], sizeof(struct acpi_table_header));
if (!header)
break;
header = (struct acpi_table_header *)
- bt_ioremap(saved_rsdt.table.table_offset_entry[i], header->length);
+ early_ioremap(saved_rsdt.table.table_offset_entry[i], header->length);
if (!header)
break;
* Fixed mappings, only the page table structure has to be
* created - mappings will be set by set_fixmap():
*/
- bt_ioremap_clear();
+ early_ioremap_clear();
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
page_table_range_init(vaddr, end, pgd_base);
- bt_ioremap_reset();
+ early_ioremap_reset();
permanent_kmaps_init(pgd_base);
static __initdata unsigned long bm_pte[1024]
__attribute__((aligned(PAGE_SIZE)));
-static inline unsigned long * __init bt_ioremap_pgd(unsigned long addr)
+static inline unsigned long * __init early_ioremap_pgd(unsigned long addr)
{
return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
}
-static inline unsigned long * __init bt_ioremap_pte(unsigned long addr)
+static inline unsigned long * __init early_ioremap_pte(unsigned long addr)
{
return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
}
-void __init bt_ioremap_init(void)
+void __init early_ioremap_init(void)
{
unsigned long *pgd;
- pgd = bt_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
+ pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
*pgd = __pa(bm_pte) | _PAGE_TABLE;
memset(bm_pte, 0, sizeof(bm_pte));
- BUG_ON(pgd != bt_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
+ BUG_ON(pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
}
-void __init bt_ioremap_clear(void)
+void __init early_ioremap_clear(void)
{
unsigned long *pgd;
- pgd = bt_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
+ pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
*pgd = 0;
__flush_tlb_all();
}
-void __init bt_ioremap_reset(void)
+void __init early_ioremap_reset(void)
{
enum fixed_addresses idx;
unsigned long *pte, phys, addr;
after_paging_init = 1;
for (idx = FIX_BTMAP_BEGIN; idx <= FIX_BTMAP_END; idx--) {
addr = fix_to_virt(idx);
- pte = bt_ioremap_pte(addr);
+ pte = early_ioremap_pte(addr);
if (!*pte & _PAGE_PRESENT) {
phys = *pte & PAGE_MASK;
set_fixmap(idx, phys);
}
}
-static void __init __bt_set_fixmap(enum fixed_addresses idx,
+static void __init __early_set_fixmap(enum fixed_addresses idx,
unsigned long phys, pgprot_t flags)
{
unsigned long *pte, addr = __fix_to_virt(idx);
BUG();
return;
}
- pte = bt_ioremap_pte(addr);
+ pte = early_ioremap_pte(addr);
if (pgprot_val(flags))
*pte = (phys & PAGE_MASK) | pgprot_val(flags);
else
__flush_tlb_one(addr);
}
-static inline void __init bt_set_fixmap(enum fixed_addresses idx,
+static inline void __init early_set_fixmap(enum fixed_addresses idx,
unsigned long phys)
{
if (after_paging_init)
set_fixmap(idx, phys);
else
- __bt_set_fixmap(idx, phys, PAGE_KERNEL);
+ __early_set_fixmap(idx, phys, PAGE_KERNEL);
}
-static inline void __init bt_clear_fixmap(enum fixed_addresses idx)
+static inline void __init early_clear_fixmap(enum fixed_addresses idx)
{
if (after_paging_init)
clear_fixmap(idx);
else
- __bt_set_fixmap(idx, 0, __pgprot(0));
+ __early_set_fixmap(idx, 0, __pgprot(0));
}
-void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
+void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
{
unsigned long offset, last_addr;
unsigned int nrpages;
*/
idx = FIX_BTMAP_BEGIN;
while (nrpages > 0) {
- bt_set_fixmap(idx, phys_addr);
+ early_set_fixmap(idx, phys_addr);
phys_addr += PAGE_SIZE;
--idx;
--nrpages;
return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
}
-void __init bt_iounmap(void *addr, unsigned long size)
+void __init early_iounmap(void *addr, unsigned long size)
{
unsigned long virt_addr;
unsigned long offset;
idx = FIX_BTMAP_BEGIN;
while (nrpages > 0) {
- bt_clear_fixmap(idx);
+ early_clear_fixmap(idx);
--idx;
--nrpages;
}
#ifdef CONFIG_X86_32
-/* Use early IO mappings for DMI because it's initialized early */
-#define dmi_ioremap bt_ioremap
-#define dmi_iounmap bt_iounmap
#define dmi_alloc alloc_bootmem
#else /* CONFIG_X86_32 */
return dmi_alloc_data + idx;
}
+#endif
+
#define dmi_ioremap early_ioremap
#define dmi_iounmap early_iounmap
#endif
-
-#endif
#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
efi_call_virt(f, a1, a2, a3, a4, a5, a6)
-#define efi_early_ioremap(addr, size) bt_ioremap(addr, size)
-#define efi_early_iounmap(vaddr, size) bt_iounmap(vaddr, size)
-
#define efi_ioremap(addr, size) ioremap(addr, size)
#define end_pfn_map max_low_pfn
efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
(u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
-#define efi_early_ioremap(addr, size) early_ioremap(addr, size)
-#define efi_early_iounmap(vaddr, size) early_iounmap(vaddr, size)
-
extern void *efi_ioremap(unsigned long offset, unsigned long size);
-extern int efi_time;
-
#endif /* CONFIG_X86_32 */
extern void efi_reserve_bootmem(void);
extern void iounmap(volatile void __iomem *addr);
/*
- * bt_ioremap() and bt_iounmap() are for temporary early boot-time
+ * early_ioremap() and early_iounmap() are for temporary early boot-time
* mappings, before the real ioremap() is functional.
* A boot-time mapping is currently limited to at most 16 pages.
*/
-extern void bt_ioremap_init(void);
-extern void bt_ioremap_clear(void);
-extern void bt_ioremap_reset(void);
-extern void *bt_ioremap(unsigned long offset, unsigned long size);
-extern void bt_iounmap(void *addr, unsigned long size);
+extern void early_ioremap_init(void);
+extern void early_ioremap_clear(void);
+extern void early_ioremap_reset(void);
+extern void *early_ioremap(unsigned long offset, unsigned long size);
+extern void early_iounmap(void *addr, unsigned long size);
extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
/* Use early IO mappings for DMI because it's initialized early */
-#define dmi_ioremap bt_ioremap
-#define dmi_iounmap bt_iounmap
+#define dmi_ioremap early_ioremap
+#define dmi_iounmap early_iounmap
#define dmi_alloc alloc_bootmem
/*