x86, mm: Unify kernel_physical_mapping_init() API
authorPekka Enberg <penberg@cs.helsinki.fi>
Wed, 24 Feb 2010 15:04:47 +0000 (17:04 +0200)
committerH. Peter Anvin <hpa@zytor.com>
Thu, 25 Feb 2010 23:15:21 +0000 (15:15 -0800)
This patch changes the 32-bit version of kernel_physical_mapping_init() to
return the last mapped address like the 64-bit one so that we can unify the
call-site in init_memory_mapping().

Cc: Yinghai Lu <yinghai@kernel.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
LKML-Reference: <alpine.DEB.2.00.1002241703570.1180@melkki.cs.helsinki.fi>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
arch/x86/mm/init.c
arch/x86/mm/init_32.c

index d406c5239019ee0e2e9609c826f1a2ba8564c693..e71c5cbc8f3561f6ce701582377749155af587b8 100644 (file)
@@ -266,16 +266,9 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
        if (!after_bootmem)
                find_early_table_space(end, use_pse, use_gbpages);
 
-#ifdef CONFIG_X86_32
-       for (i = 0; i < nr_range; i++)
-               kernel_physical_mapping_init(mr[i].start, mr[i].end,
-                                            mr[i].page_size_mask);
-       ret = end;
-#else /* CONFIG_X86_64 */
        for (i = 0; i < nr_range; i++)
                ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
                                                   mr[i].page_size_mask);
-#endif
 
 #ifdef CONFIG_X86_32
        early_ioremap_page_table_range_init();
index 9a0c258a86be6b023b59d7880e47e7c38290dfac..2226f2c70ea3ffc090b670c06e7eddbdd8fc84b5 100644 (file)
@@ -241,6 +241,7 @@ kernel_physical_mapping_init(unsigned long start,
                             unsigned long page_size_mask)
 {
        int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
+       unsigned long last_map_addr = end;
        unsigned long start_pfn, end_pfn;
        pgd_t *pgd_base = swapper_pg_dir;
        int pgd_idx, pmd_idx, pte_ofs;
@@ -341,9 +342,10 @@ repeat:
                                        prot = PAGE_KERNEL_EXEC;
 
                                pages_4k++;
-                               if (mapping_iter == 1)
+                               if (mapping_iter == 1) {
                                        set_pte(pte, pfn_pte(pfn, init_prot));
-                               else
+                                       last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE;
+                               } else
                                        set_pte(pte, pfn_pte(pfn, prot));
                        }
                }
@@ -368,7 +370,7 @@ repeat:
                mapping_iter = 2;
                goto repeat;
        }
-       return 0;
+       return last_map_addr;
 }
 
 pte_t *kmap_pte;