Unify sys_mmap*
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / kernel / sys_x86_64.c
CommitLineData
1da177e4
LT
1#include <linux/errno.h>
2#include <linux/sched.h>
3#include <linux/syscalls.h>
4#include <linux/mm.h>
4e950f6f 5#include <linux/fs.h>
1da177e4 6#include <linux/smp.h>
1da177e4
LT
7#include <linux/sem.h>
8#include <linux/msg.h>
9#include <linux/shm.h>
10#include <linux/stat.h>
11#include <linux/mman.h>
12#include <linux/file.h>
13#include <linux/utsname.h>
14#include <linux/personality.h>
cc503c1b 15#include <linux/random.h>
e9c8abb6 16#include <linux/uaccess.h>
1da177e4 17
1da177e4 18#include <asm/ia32.h>
bbc1f698 19#include <asm/syscalls.h>
1da177e4 20
0ac676fb
JB
21SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
22 unsigned long, prot, unsigned long, flags,
23 unsigned long, fd, unsigned long, off)
1da177e4
LT
24{
25 long error;
1da177e4
LT
26 error = -EINVAL;
27 if (off & ~PAGE_MASK)
28 goto out;
29
f8b72560 30 error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
1da177e4
LT
31out:
32 return error;
33}
34
35static void find_start_end(unsigned long flags, unsigned long *begin,
36 unsigned long *end)
37{
84929801 38 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
cc503c1b 39 unsigned long new_begin;
1da177e4
LT
40 /* This is usually used needed to map code in small
41 model, so it needs to be in the first 31bit. Limit
42 it to that. This means we need to move the
43 unmapped base down for this case. This can give
44 conflicts with the heap, but we assume that glibc
45 malloc knows how to fall back to mmap. Give it 1GB
e9c8abb6
GP
46 of playground for now. -AK */
47 *begin = 0x40000000;
48 *end = 0x80000000;
cc503c1b
JK
49 if (current->flags & PF_RANDOMIZE) {
50 new_begin = randomize_range(*begin, *begin + 0x02000000, 0);
51 if (new_begin)
52 *begin = new_begin;
53 }
84929801
SS
54 } else {
55 *begin = TASK_UNMAPPED_BASE;
e9c8abb6 56 *end = TASK_SIZE;
84929801 57 }
e9c8abb6 58}
1da177e4
LT
59
60unsigned long
61arch_get_unmapped_area(struct file *filp, unsigned long addr,
62 unsigned long len, unsigned long pgoff, unsigned long flags)
63{
64 struct mm_struct *mm = current->mm;
65 struct vm_area_struct *vma;
66 unsigned long start_addr;
67 unsigned long begin, end;
e9c8abb6 68
11300a64
BH
69 if (flags & MAP_FIXED)
70 return addr;
71
e9c8abb6 72 find_start_end(flags, &begin, &end);
1da177e4
LT
73
74 if (len > end)
75 return -ENOMEM;
76
77 if (addr) {
78 addr = PAGE_ALIGN(addr);
79 vma = find_vma(mm, addr);
80 if (end - len >= addr &&
81 (!vma || addr + len <= vma->vm_start))
82 return addr;
83 }
1363c3cd
WW
84 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
85 && len <= mm->cached_hole_size) {
e9c8abb6 86 mm->cached_hole_size = 0;
1363c3cd
WW
87 mm->free_area_cache = begin;
88 }
1da177e4 89 addr = mm->free_area_cache;
e9c8abb6
GP
90 if (addr < begin)
91 addr = begin;
1da177e4
LT
92 start_addr = addr;
93
94full_search:
95 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
96 /* At this point: (!vma || addr < vma->vm_end). */
97 if (end - len < addr) {
98 /*
99 * Start a new search - just in case we missed
100 * some holes.
101 */
102 if (start_addr != begin) {
103 start_addr = addr = begin;
1363c3cd 104 mm->cached_hole_size = 0;
1da177e4
LT
105 goto full_search;
106 }
107 return -ENOMEM;
108 }
109 if (!vma || addr + len <= vma->vm_start) {
110 /*
111 * Remember the place where we stopped the search:
112 */
113 mm->free_area_cache = addr + len;
114 return addr;
115 }
1363c3cd 116 if (addr + mm->cached_hole_size < vma->vm_start)
e9c8abb6 117 mm->cached_hole_size = vma->vm_start - addr;
1363c3cd 118
1da177e4
LT
119 addr = vma->vm_end;
120 }
121}
122
cc503c1b
JK
123
124unsigned long
125arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
126 const unsigned long len, const unsigned long pgoff,
127 const unsigned long flags)
128{
129 struct vm_area_struct *vma;
130 struct mm_struct *mm = current->mm;
131 unsigned long addr = addr0;
132
133 /* requested length too big for entire address space */
134 if (len > TASK_SIZE)
135 return -ENOMEM;
136
137 if (flags & MAP_FIXED)
138 return addr;
139
140 /* for MAP_32BIT mappings we force the legact mmap base */
141 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
142 goto bottomup;
143
144 /* requesting a specific address */
145 if (addr) {
146 addr = PAGE_ALIGN(addr);
147 vma = find_vma(mm, addr);
148 if (TASK_SIZE - len >= addr &&
149 (!vma || addr + len <= vma->vm_start))
150 return addr;
151 }
152
153 /* check if free_area_cache is useful for us */
154 if (len <= mm->cached_hole_size) {
bb1ad820
AM
155 mm->cached_hole_size = 0;
156 mm->free_area_cache = mm->mmap_base;
157 }
cc503c1b
JK
158
159 /* either no address requested or can't fit in requested address hole */
160 addr = mm->free_area_cache;
161
162 /* make sure it can fit in the remaining address space */
163 if (addr > len) {
164 vma = find_vma(mm, addr-len);
165 if (!vma || addr <= vma->vm_start)
166 /* remember the address as a hint for next time */
e9c8abb6 167 return mm->free_area_cache = addr-len;
cc503c1b
JK
168 }
169
170 if (mm->mmap_base < len)
171 goto bottomup;
172
173 addr = mm->mmap_base-len;
174
175 do {
176 /*
177 * Lookup failure means no vma is above this address,
178 * else if new region fits below vma->vm_start,
179 * return with success:
180 */
181 vma = find_vma(mm, addr);
182 if (!vma || addr+len <= vma->vm_start)
183 /* remember the address as a hint for next time */
e9c8abb6 184 return mm->free_area_cache = addr;
cc503c1b 185
bb1ad820
AM
186 /* remember the largest hole we saw so far */
187 if (addr + mm->cached_hole_size < vma->vm_start)
188 mm->cached_hole_size = vma->vm_start - addr;
cc503c1b
JK
189
190 /* try just below the current vma->vm_start */
191 addr = vma->vm_start-len;
192 } while (len < vma->vm_start);
193
194bottomup:
195 /*
196 * A failed mmap() very likely causes application failure,
197 * so fall back to the bottom-up function here. This scenario
198 * can happen with large stack limits and large mmap()
199 * allocations.
200 */
201 mm->cached_hole_size = ~0UL;
bb1ad820 202 mm->free_area_cache = TASK_UNMAPPED_BASE;
cc503c1b
JK
203 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
204 /*
205 * Restore the topdown base:
206 */
207 mm->free_area_cache = mm->mmap_base;
208 mm->cached_hole_size = ~0UL;
209
210 return addr;
211}
212
213
0ac676fb 214SYSCALL_DEFINE1(uname, struct new_utsname __user *, name)
1da177e4
LT
215{
216 int err;
217 down_read(&uts_sem);
e9c8abb6 218 err = copy_to_user(name, utsname(), sizeof(*name));
1da177e4 219 up_read(&uts_sem);
e9c8abb6
GP
220 if (personality(current->personality) == PER_LINUX32)
221 err |= copy_to_user(&name->machine, "i686", 5);
1da177e4
LT
222 return err ? -EFAULT : 0;
223}