X-Git-Url: https://git.stricted.de/?p=GitHub%2Fmt8127%2Fandroid_kernel_alcatel_ttab.git;a=blobdiff_plain;f=arch%2Farm%2Fmm%2Fmmu.c;h=54fc6e5c43703cb43354c0c4812dd1f0e790096c;hp=e0d8565671a6c8104c643937098b963ac2f517ee;hb=6fa3eb70c07b7ce2061fd6602159ac2d45a7dc3d;hpb=c583eac39abfd45d1985f60c5c1727e585fadbdd diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index e0d8565671a6..54fc6e5c4370 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -32,6 +32,7 @@ #include #include #include +#include #include "mm.h" #include "tcm.h" @@ -458,6 +459,16 @@ static void __init build_mem_type_table(void) s2_pgprot = cp->pte_s2; hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte; + /* + * We don't use domains on ARMv6 (since this causes problems with + * v6/v7 kernels), so we must use a separate memory type for user + * r/o, kernel r/w to map the vectors page. + */ +#ifndef CONFIG_ARM_LPAE + if (cpu_arch == CPU_ARCH_ARMv6) + vecs_pgprot |= L_PTE_MT_VECTORS; +#endif + /* * ARMv6 and above have extended page tables. */ @@ -595,11 +606,25 @@ static void __init *early_alloc(unsigned long sz) return early_alloc_aligned(sz, sz); } -static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot) +static pte_t * __init early_pte_alloc(pmd_t *pmd) +{ + if (pmd_none(*pmd) || pmd_bad(*pmd)) + return early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE); + return pmd_page_vaddr(*pmd); +} + +static void __init early_pte_install(pmd_t *pmd, pte_t *pte, unsigned long prot) +{ + __pmd_populate(pmd, __pa(pte), prot); + BUG_ON(pmd_bad(*pmd)); +} + +static pte_t * __init early_pte_alloc_and_install(pmd_t *pmd, + unsigned long addr, unsigned long prot) { if (pmd_none(*pmd)) { - pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE); - __pmd_populate(pmd, __pa(pte), prot); + pte_t *pte = early_pte_alloc(pmd); + early_pte_install(pmd, pte, prot); } BUG_ON(pmd_bad(*pmd)); return pte_offset_kernel(pmd, addr); @@ -609,17 +634,25 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, unsigned long end, unsigned long pfn, const struct mem_type *type) { - pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1); + pte_t *start_pte = early_pte_alloc(pmd); + pte_t *pte = start_pte + pte_index(addr); + + /* If replacing a section mapping, the whole section must be replaced */ + BUG_ON(!pmd_none(*pmd) && pmd_bad(*pmd) && ((addr | end) & ~PMD_MASK)); + do { set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0); pfn++; } while (pte++, addr += PAGE_SIZE, addr != end); + early_pte_install(pmd, start_pte, type->prot_l1); } -static void __init map_init_section(pmd_t *pmd, unsigned long addr, +static void __init __map_init_section(pmd_t *pmd, unsigned long addr, unsigned long end, phys_addr_t phys, const struct mem_type *type) { + pmd_t *p = pmd; + #ifndef CONFIG_ARM_LPAE /* * In classic MMU format, puds and pmds are folded in to @@ -638,12 +671,13 @@ static void __init map_init_section(pmd_t *pmd, unsigned long addr, phys += SECTION_SIZE; } while (pmd++, addr += SECTION_SIZE, addr != end); - flush_pmd_entry(pmd); + flush_pmd_entry(p); } static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end, phys_addr_t phys, - const struct mem_type *type) + const struct mem_type *type, + bool force_pages) { pmd_t *pmd = pmd_offset(pud, addr); unsigned long next; @@ -660,8 +694,9 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, * aligned to a section boundary. */ if (type->prot_sect && - ((addr | next | phys) & ~SECTION_MASK) == 0) { - map_init_section(pmd, addr, next, phys, type); + ((addr | next | phys) & ~SECTION_MASK) == 0 && + !force_pages) { + __map_init_section(pmd, addr, next, phys, type); } else { alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys), type); @@ -673,14 +708,15 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, } static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, - unsigned long end, unsigned long phys, const struct mem_type *type) + unsigned long end, unsigned long phys, const struct mem_type *type, + bool force_pages) { pud_t *pud = pud_offset(pgd, addr); unsigned long next; do { next = pud_addr_end(addr, end); - alloc_init_pmd(pud, addr, next, phys, type); + alloc_init_pmd(pud, addr, next, phys, type, force_pages); phys += next - addr; } while (pud++, addr = next, addr != end); } @@ -754,7 +790,7 @@ static void __init create_36bit_mapping(struct map_desc *md, * offsets, and we take full advantage of sections and * supersections. */ -static void __init create_mapping(struct map_desc *md) +static void __init create_mapping(struct map_desc *md, bool force_pages) { unsigned long addr, length, end; phys_addr_t phys; @@ -804,7 +840,7 @@ static void __init create_mapping(struct map_desc *md) do { unsigned long next = pgd_addr_end(addr, end); - alloc_init_pud(pgd, addr, next, phys, type); + alloc_init_pud(pgd, addr, next, phys, type, force_pages); phys += next - addr; addr = next; @@ -826,7 +862,7 @@ void __init iotable_init(struct map_desc *io_desc, int nr) svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm)); for (md = io_desc; nr; md++, nr--) { - create_mapping(md); + create_mapping(md, false); vm = &svm->vm; vm->addr = (void *)(md->virtual & PAGE_MASK); @@ -947,7 +983,7 @@ void __init debug_ll_io_init(void) map.virtual &= PAGE_MASK; map.length = PAGE_SIZE; map.type = MT_DEVICE; - create_mapping(&map); + create_mapping(&map, false); } #endif @@ -992,6 +1028,28 @@ void __init sanity_check_meminfo(void) struct membank *bank = &meminfo.bank[j]; *bank = meminfo.bank[i]; +#ifdef CONFIG_SPARSEMEM + if (pfn_to_section_nr(bank_pfn_start(bank)) != + pfn_to_section_nr(bank_pfn_end(bank) - 1)) { + phys_addr_t sz; + unsigned long start_pfn = bank_pfn_start(bank); + unsigned long end_pfn = SECTION_ALIGN_UP(start_pfn + 1); + sz = ((phys_addr_t)(end_pfn - start_pfn) << PAGE_SHIFT); + + if (meminfo.nr_banks >= NR_BANKS) { + pr_crit("NR_BANKS too low, ignoring %lld bytes of memory\n", + (unsigned long long)(bank->size - sz)); + } else { + memmove(bank + 1, bank, + (meminfo.nr_banks - i) * sizeof(*bank)); + meminfo.nr_banks++; + bank[1].size -= sz; + bank[1].start = __pfn_to_phys(end_pfn); + } + bank->size = sz; + } +#endif + if (bank->start > ULONG_MAX) highmem = 1; @@ -1173,7 +1231,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) /* * Allocate the vector page early. */ - vectors = early_alloc(PAGE_SIZE); + vectors = early_alloc(PAGE_SIZE * 2); early_trap_init(vectors); @@ -1189,7 +1247,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) map.virtual = MODULES_VADDR; map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK; map.type = MT_ROM; - create_mapping(&map); + create_mapping(&map, false); #endif /* @@ -1200,14 +1258,14 @@ static void __init devicemaps_init(struct machine_desc *mdesc) map.virtual = FLUSH_BASE; map.length = SZ_1M; map.type = MT_CACHECLEAN; - create_mapping(&map); + create_mapping(&map, false); #endif #ifdef FLUSH_BASE_MINICACHE map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M); map.virtual = FLUSH_BASE_MINICACHE; map.length = SZ_1M; map.type = MT_MINICLEAN; - create_mapping(&map); + create_mapping(&map, false); #endif /* @@ -1218,15 +1276,27 @@ static void __init devicemaps_init(struct machine_desc *mdesc) map.pfn = __phys_to_pfn(virt_to_phys(vectors)); map.virtual = 0xffff0000; map.length = PAGE_SIZE; +#ifdef CONFIG_KUSER_HELPERS map.type = MT_HIGH_VECTORS; - create_mapping(&map); +#else + map.type = MT_LOW_VECTORS; +#endif + create_mapping(&map, false); if (!vectors_high()) { map.virtual = 0; + map.length = PAGE_SIZE * 2; map.type = MT_LOW_VECTORS; - create_mapping(&map); + create_mapping(&map, false); } + /* Now create a kernel read-only mapping */ + map.pfn += 1; + map.virtual = 0xffff0000 + PAGE_SIZE; + map.length = PAGE_SIZE; + map.type = MT_LOW_VECTORS; + create_mapping(&map, false); + /* * Ask the machine support to map in the statically mapped devices. */ @@ -1250,33 +1320,62 @@ static void __init devicemaps_init(struct machine_desc *mdesc) static void __init kmap_init(void) { #ifdef CONFIG_HIGHMEM - pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE), + pkmap_page_table = early_pte_alloc_and_install(pmd_off_k(PKMAP_BASE), PKMAP_BASE, _PAGE_KERNEL_TABLE); #endif } + static void __init map_lowmem(void) { struct memblock_region *reg; + phys_addr_t start; + phys_addr_t end; + phys_addr_t limit = 0; + struct map_desc map; /* Map all the lowmem memory banks. */ for_each_memblock(memory, reg) { - phys_addr_t start = reg->base; - phys_addr_t end = start + reg->size; - struct map_desc map; + start = reg->base; + end = start + reg->size; + MTK_MEMCFG_LOG_AND_PRINTK(KERN_ALERT"[PHY layout]kernel : 0x%08llx - 0x%08llx (0x%08llx)\n", + (unsigned long long)start, + (unsigned long long)end - 1, + (unsigned long long)reg->size); if (end > arm_lowmem_limit) end = arm_lowmem_limit; if (start >= end) - break; + continue; map.pfn = __phys_to_pfn(start); map.virtual = __phys_to_virt(start); map.length = end - start; map.type = MT_MEMORY; - create_mapping(&map); + if (!limit && !(end & ~SECTION_MASK)) { + /* take first section-size aligned memblock */ + limit = end; + memblock_set_current_limit(limit); + } + printk(KERN_ALERT"creating mapping start pa: 0x%08llx @ 0x%08llx " + ", end pa: 0x%08llx @ 0x%08llx\n", + (unsigned long long)start, (unsigned long long)map.virtual, + (unsigned long long)end, (unsigned long long)__phys_to_virt(end)); + create_mapping(&map, false); } + +#ifdef CONFIG_DEBUG_RODATA + start = __pa((unsigned long)_stext & PMD_MASK); + end = __pa(ALIGN((unsigned long)__end_rodata, PMD_SIZE)); + + map.pfn = __phys_to_pfn(start); + map.virtual = __phys_to_virt(start); + map.length = end - start; + map.type = MT_MEMORY; + + create_mapping(&map, true); +#endif } /*