import PULS_20160108
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / mm / mmu.c
index fb3c446af9e5ea4ddd98c85bfbd36db135092f2f..54fc6e5c43703cb43354c0c4812dd1f0e790096c 100644 (file)
@@ -32,6 +32,7 @@
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <asm/mach/pci.h>
+#include <mach/mtk_memcfg.h>
 
 #include "mm.h"
 #include "tcm.h"
@@ -605,11 +606,25 @@ static void __init *early_alloc(unsigned long sz)
        return early_alloc_aligned(sz, sz);
 }
 
-static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
+static pte_t * __init early_pte_alloc(pmd_t *pmd)
+{
+       if (pmd_none(*pmd) || pmd_bad(*pmd))
+               return early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
+       return pmd_page_vaddr(*pmd);
+}
+
+static void __init early_pte_install(pmd_t *pmd, pte_t *pte, unsigned long prot)
+{
+       __pmd_populate(pmd, __pa(pte), prot);
+       BUG_ON(pmd_bad(*pmd));
+}
+
+static pte_t * __init early_pte_alloc_and_install(pmd_t *pmd,
+       unsigned long addr, unsigned long prot)
 {
        if (pmd_none(*pmd)) {
-               pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
-               __pmd_populate(pmd, __pa(pte), prot);
+               pte_t *pte = early_pte_alloc(pmd);
+               early_pte_install(pmd, pte, prot);
        }
        BUG_ON(pmd_bad(*pmd));
        return pte_offset_kernel(pmd, addr);
@@ -619,11 +634,17 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
                                  unsigned long end, unsigned long pfn,
                                  const struct mem_type *type)
 {
-       pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
+       pte_t *start_pte = early_pte_alloc(pmd);
+       pte_t *pte = start_pte + pte_index(addr);
+
+       /* If replacing a section mapping, the whole section must be replaced */
+       BUG_ON(!pmd_none(*pmd) && pmd_bad(*pmd) && ((addr | end) & ~PMD_MASK));
+
        do {
                set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
                pfn++;
        } while (pte++, addr += PAGE_SIZE, addr != end);
+       early_pte_install(pmd, start_pte, type->prot_l1);
 }
 
 static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
@@ -655,7 +676,8 @@ static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
 
 static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
                                      unsigned long end, phys_addr_t phys,
-                                     const struct mem_type *type)
+                                     const struct mem_type *type,
+                                     bool force_pages)
 {
        pmd_t *pmd = pmd_offset(pud, addr);
        unsigned long next;
@@ -672,7 +694,8 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
                 * aligned to a section boundary.
                 */
                if (type->prot_sect &&
-                               ((addr | next | phys) & ~SECTION_MASK) == 0) {
+                               ((addr | next | phys) & ~SECTION_MASK) == 0 &&
+                               !force_pages) {
                        __map_init_section(pmd, addr, next, phys, type);
                } else {
                        alloc_init_pte(pmd, addr, next,
@@ -685,14 +708,15 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
 }
 
 static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
-       unsigned long end, unsigned long phys, const struct mem_type *type)
+       unsigned long end, unsigned long phys, const struct mem_type *type,
+       bool force_pages)
 {
        pud_t *pud = pud_offset(pgd, addr);
        unsigned long next;
 
        do {
                next = pud_addr_end(addr, end);
-               alloc_init_pmd(pud, addr, next, phys, type);
+               alloc_init_pmd(pud, addr, next, phys, type, force_pages);
                phys += next - addr;
        } while (pud++, addr = next, addr != end);
 }
@@ -766,7 +790,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
  * offsets, and we take full advantage of sections and
  * supersections.
  */
-static void __init create_mapping(struct map_desc *md)
+static void __init create_mapping(struct map_desc *md, bool force_pages)
 {
        unsigned long addr, length, end;
        phys_addr_t phys;
@@ -816,7 +840,7 @@ static void __init create_mapping(struct map_desc *md)
        do {
                unsigned long next = pgd_addr_end(addr, end);
 
-               alloc_init_pud(pgd, addr, next, phys, type);
+               alloc_init_pud(pgd, addr, next, phys, type, force_pages);
 
                phys += next - addr;
                addr = next;
@@ -838,7 +862,7 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
        svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
 
        for (md = io_desc; nr; md++, nr--) {
-               create_mapping(md);
+               create_mapping(md, false);
 
                vm = &svm->vm;
                vm->addr = (void *)(md->virtual & PAGE_MASK);
@@ -959,7 +983,7 @@ void __init debug_ll_io_init(void)
        map.virtual &= PAGE_MASK;
        map.length = PAGE_SIZE;
        map.type = MT_DEVICE;
-       create_mapping(&map);
+       create_mapping(&map, false);
 }
 #endif
 
@@ -1004,6 +1028,28 @@ void __init sanity_check_meminfo(void)
                struct membank *bank = &meminfo.bank[j];
                *bank = meminfo.bank[i];
 
+#ifdef CONFIG_SPARSEMEM
+               if (pfn_to_section_nr(bank_pfn_start(bank)) !=
+                   pfn_to_section_nr(bank_pfn_end(bank) - 1)) {
+                       phys_addr_t sz;
+                       unsigned long start_pfn = bank_pfn_start(bank);
+                       unsigned long end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
+                       sz = ((phys_addr_t)(end_pfn - start_pfn) << PAGE_SHIFT);
+
+                       if (meminfo.nr_banks >= NR_BANKS) {
+                               pr_crit("NR_BANKS too low, ignoring %lld bytes of memory\n",
+                                       (unsigned long long)(bank->size - sz));
+                       } else {
+                               memmove(bank + 1, bank,
+                                       (meminfo.nr_banks - i) * sizeof(*bank));
+                               meminfo.nr_banks++;
+                               bank[1].size -= sz;
+                               bank[1].start = __pfn_to_phys(end_pfn);
+                       }
+                       bank->size = sz;
+               }
+#endif
+
                if (bank->start > ULONG_MAX)
                        highmem = 1;
 
@@ -1201,7 +1247,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
        map.virtual = MODULES_VADDR;
        map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
        map.type = MT_ROM;
-       create_mapping(&map);
+       create_mapping(&map, false);
 #endif
 
        /*
@@ -1212,14 +1258,14 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
        map.virtual = FLUSH_BASE;
        map.length = SZ_1M;
        map.type = MT_CACHECLEAN;
-       create_mapping(&map);
+       create_mapping(&map, false);
 #endif
 #ifdef FLUSH_BASE_MINICACHE
        map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
        map.virtual = FLUSH_BASE_MINICACHE;
        map.length = SZ_1M;
        map.type = MT_MINICLEAN;
-       create_mapping(&map);
+       create_mapping(&map, false);
 #endif
 
        /*
@@ -1235,13 +1281,13 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
 #else
        map.type = MT_LOW_VECTORS;
 #endif
-       create_mapping(&map);
+       create_mapping(&map, false);
 
        if (!vectors_high()) {
                map.virtual = 0;
                map.length = PAGE_SIZE * 2;
                map.type = MT_LOW_VECTORS;
-               create_mapping(&map);
+               create_mapping(&map, false);
        }
 
        /* Now create a kernel read-only mapping */
@@ -1249,7 +1295,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
        map.virtual = 0xffff0000 + PAGE_SIZE;
        map.length = PAGE_SIZE;
        map.type = MT_LOW_VECTORS;
-       create_mapping(&map);
+       create_mapping(&map, false);
 
        /*
         * Ask the machine support to map in the statically mapped devices.
@@ -1274,33 +1320,62 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
 static void __init kmap_init(void)
 {
 #ifdef CONFIG_HIGHMEM
-       pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
+       pkmap_page_table = early_pte_alloc_and_install(pmd_off_k(PKMAP_BASE),
                PKMAP_BASE, _PAGE_KERNEL_TABLE);
 #endif
 }
 
+
 static void __init map_lowmem(void)
 {
        struct memblock_region *reg;
+       phys_addr_t start;
+       phys_addr_t end;
+       phys_addr_t limit = 0;
+       struct map_desc map;
 
        /* Map all the lowmem memory banks. */
        for_each_memblock(memory, reg) {
-               phys_addr_t start = reg->base;
-               phys_addr_t end = start + reg->size;
-               struct map_desc map;
+               start = reg->base;
+               end = start + reg->size;
+                MTK_MEMCFG_LOG_AND_PRINTK(KERN_ALERT"[PHY layout]kernel   :   0x%08llx - 0x%08llx (0x%08llx)\n",
+                      (unsigned long long)start,
+                      (unsigned long long)end - 1,
+                      (unsigned long long)reg->size);
 
                if (end > arm_lowmem_limit)
                        end = arm_lowmem_limit;
                if (start >= end)
-                       break;
+                       continue;
 
                map.pfn = __phys_to_pfn(start);
                map.virtual = __phys_to_virt(start);
                map.length = end - start;
                map.type = MT_MEMORY;
 
-               create_mapping(&map);
+               if (!limit && !(end & ~SECTION_MASK)) {
+                       /* take first section-size aligned memblock */
+                       limit = end;
+                       memblock_set_current_limit(limit);
+               }
+                printk(KERN_ALERT"creating mapping start pa: 0x%08llx @ 0x%08llx "
+                        ", end pa: 0x%08llx @ 0x%08llx\n",
+                       (unsigned long long)start, (unsigned long long)map.virtual,
+                       (unsigned long long)end, (unsigned long long)__phys_to_virt(end));
+               create_mapping(&map, false);
        }
+
+#ifdef CONFIG_DEBUG_RODATA
+       start = __pa((unsigned long)_stext & PMD_MASK);
+       end =  __pa(ALIGN((unsigned long)__end_rodata, PMD_SIZE));
+
+       map.pfn = __phys_to_pfn(start);
+       map.virtual = __phys_to_virt(start);
+       map.length = end - start;
+       map.type = MT_MEMORY;
+
+       create_mapping(&map, true);
+#endif
 }
 
 /*