[ARM] 3813/1: prevent >= 4G /dev/mem mmap()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / mm / mmap.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/mm/mmap.c
3 */
1da177e4
LT
4#include <linux/fs.h>
5#include <linux/mm.h>
6#include <linux/mman.h>
7#include <linux/shm.h>
8
9#include <asm/system.h>
10
11#define COLOUR_ALIGN(addr,pgoff) \
12 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
13 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
14
15/*
16 * We need to ensure that shared mappings are correctly aligned to
17 * avoid aliasing issues with VIPT caches. We need to ensure that
18 * a specific page of an object is always mapped at a multiple of
19 * SHMLBA bytes.
20 *
21 * We unconditionally provide this function for all cases, however
22 * in the VIVT case, we optimise out the alignment rules.
23 */
24unsigned long
25arch_get_unmapped_area(struct file *filp, unsigned long addr,
26 unsigned long len, unsigned long pgoff, unsigned long flags)
27{
28 struct mm_struct *mm = current->mm;
29 struct vm_area_struct *vma;
30 unsigned long start_addr;
31#ifdef CONFIG_CPU_V6
32 unsigned int cache_type;
33 int do_align = 0, aliasing = 0;
34
35 /*
36 * We only need to do colour alignment if either the I or D
37 * caches alias. This is indicated by bits 9 and 21 of the
38 * cache type register.
39 */
40 cache_type = read_cpuid(CPUID_CACHETYPE);
41 if (cache_type != read_cpuid(CPUID_ID)) {
42 aliasing = (cache_type | cache_type >> 12) & (1 << 11);
43 if (aliasing)
44 do_align = filp || flags & MAP_SHARED;
45 }
46#else
47#define do_align 0
48#define aliasing 0
49#endif
50
51 /*
52 * We should enforce the MAP_FIXED case. However, currently
53 * the generic kernel code doesn't allow us to handle this.
54 */
55 if (flags & MAP_FIXED) {
56 if (aliasing && flags & MAP_SHARED && addr & (SHMLBA - 1))
57 return -EINVAL;
58 return addr;
59 }
60
61 if (len > TASK_SIZE)
62 return -ENOMEM;
63
64 if (addr) {
65 if (do_align)
66 addr = COLOUR_ALIGN(addr, pgoff);
67 else
68 addr = PAGE_ALIGN(addr);
69
70 vma = find_vma(mm, addr);
71 if (TASK_SIZE - len >= addr &&
72 (!vma || addr + len <= vma->vm_start))
73 return addr;
74 }
1363c3cd
WW
75 if (len > mm->cached_hole_size) {
76 start_addr = addr = mm->free_area_cache;
77 } else {
78 start_addr = addr = TASK_UNMAPPED_BASE;
79 mm->cached_hole_size = 0;
80 }
1da177e4
LT
81
82full_search:
83 if (do_align)
84 addr = COLOUR_ALIGN(addr, pgoff);
85 else
86 addr = PAGE_ALIGN(addr);
87
88 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
89 /* At this point: (!vma || addr < vma->vm_end). */
90 if (TASK_SIZE - len < addr) {
91 /*
92 * Start a new search - just in case we missed
93 * some holes.
94 */
95 if (start_addr != TASK_UNMAPPED_BASE) {
96 start_addr = addr = TASK_UNMAPPED_BASE;
1363c3cd 97 mm->cached_hole_size = 0;
1da177e4
LT
98 goto full_search;
99 }
100 return -ENOMEM;
101 }
102 if (!vma || addr + len <= vma->vm_start) {
103 /*
104 * Remember the place where we stopped the search:
105 */
106 mm->free_area_cache = addr + len;
107 return addr;
108 }
1363c3cd
WW
109 if (addr + mm->cached_hole_size < vma->vm_start)
110 mm->cached_hole_size = vma->vm_start - addr;
1da177e4
LT
111 addr = vma->vm_end;
112 if (do_align)
113 addr = COLOUR_ALIGN(addr, pgoff);
114 }
115}
116
51635ad2
LB
117
118/*
119 * You really shouldn't be using read() or write() on /dev/mem. This
120 * might go away in the future.
121 */
122int valid_phys_addr_range(unsigned long addr, size_t size)
123{
124 if (addr + size > __pa(high_memory))
125 return 0;
126
127 return 1;
128}
129
130/*
131 * We don't use supersection mappings for mmap() on /dev/mem, which
132 * means that we can't map the memory area above the 4G barrier into
133 * userspace.
134 */
135int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
136{
137 return !(pfn + (size >> PAGE_SHIFT) > 0x00100000);
138}