tracing: Add more namespace area to 'perf list' output
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / kernel / sys_x86_64.c
CommitLineData
1da177e4
LT
1#include <linux/errno.h>
2#include <linux/sched.h>
3#include <linux/syscalls.h>
4#include <linux/mm.h>
4e950f6f 5#include <linux/fs.h>
1da177e4 6#include <linux/smp.h>
1da177e4
LT
7#include <linux/sem.h>
8#include <linux/msg.h>
9#include <linux/shm.h>
10#include <linux/stat.h>
11#include <linux/mman.h>
12#include <linux/file.h>
13#include <linux/utsname.h>
14#include <linux/personality.h>
cc503c1b 15#include <linux/random.h>
e9c8abb6 16#include <linux/uaccess.h>
1da177e4 17
1da177e4 18#include <asm/ia32.h>
bbc1f698 19#include <asm/syscalls.h>
1da177e4 20
e9c8abb6
GP
21asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
22 unsigned long prot, unsigned long flags,
23 unsigned long fd, unsigned long off)
1da177e4
LT
24{
25 long error;
e9c8abb6 26 struct file *file;
1da177e4
LT
27
28 error = -EINVAL;
29 if (off & ~PAGE_MASK)
30 goto out;
31
32 error = -EBADF;
33 file = NULL;
34 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
35 if (!(flags & MAP_ANONYMOUS)) {
36 file = fget(fd);
37 if (!file)
38 goto out;
39 }
40 down_write(&current->mm->mmap_sem);
41 error = do_mmap_pgoff(file, addr, len, prot, flags, off >> PAGE_SHIFT);
42 up_write(&current->mm->mmap_sem);
43
44 if (file)
45 fput(file);
46out:
47 return error;
48}
49
50static void find_start_end(unsigned long flags, unsigned long *begin,
51 unsigned long *end)
52{
84929801 53 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
cc503c1b 54 unsigned long new_begin;
1da177e4
LT
55 /* This is usually used needed to map code in small
56 model, so it needs to be in the first 31bit. Limit
57 it to that. This means we need to move the
58 unmapped base down for this case. This can give
59 conflicts with the heap, but we assume that glibc
60 malloc knows how to fall back to mmap. Give it 1GB
e9c8abb6
GP
61 of playground for now. -AK */
62 *begin = 0x40000000;
63 *end = 0x80000000;
cc503c1b
JK
64 if (current->flags & PF_RANDOMIZE) {
65 new_begin = randomize_range(*begin, *begin + 0x02000000, 0);
66 if (new_begin)
67 *begin = new_begin;
68 }
84929801
SS
69 } else {
70 *begin = TASK_UNMAPPED_BASE;
e9c8abb6 71 *end = TASK_SIZE;
84929801 72 }
e9c8abb6 73}
1da177e4
LT
74
75unsigned long
76arch_get_unmapped_area(struct file *filp, unsigned long addr,
77 unsigned long len, unsigned long pgoff, unsigned long flags)
78{
79 struct mm_struct *mm = current->mm;
80 struct vm_area_struct *vma;
81 unsigned long start_addr;
82 unsigned long begin, end;
e9c8abb6 83
11300a64
BH
84 if (flags & MAP_FIXED)
85 return addr;
86
e9c8abb6 87 find_start_end(flags, &begin, &end);
1da177e4
LT
88
89 if (len > end)
90 return -ENOMEM;
91
92 if (addr) {
93 addr = PAGE_ALIGN(addr);
94 vma = find_vma(mm, addr);
95 if (end - len >= addr &&
96 (!vma || addr + len <= vma->vm_start))
97 return addr;
98 }
1363c3cd
WW
99 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
100 && len <= mm->cached_hole_size) {
e9c8abb6 101 mm->cached_hole_size = 0;
1363c3cd
WW
102 mm->free_area_cache = begin;
103 }
1da177e4 104 addr = mm->free_area_cache;
e9c8abb6
GP
105 if (addr < begin)
106 addr = begin;
1da177e4
LT
107 start_addr = addr;
108
109full_search:
110 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
111 /* At this point: (!vma || addr < vma->vm_end). */
112 if (end - len < addr) {
113 /*
114 * Start a new search - just in case we missed
115 * some holes.
116 */
117 if (start_addr != begin) {
118 start_addr = addr = begin;
1363c3cd 119 mm->cached_hole_size = 0;
1da177e4
LT
120 goto full_search;
121 }
122 return -ENOMEM;
123 }
124 if (!vma || addr + len <= vma->vm_start) {
125 /*
126 * Remember the place where we stopped the search:
127 */
128 mm->free_area_cache = addr + len;
129 return addr;
130 }
1363c3cd 131 if (addr + mm->cached_hole_size < vma->vm_start)
e9c8abb6 132 mm->cached_hole_size = vma->vm_start - addr;
1363c3cd 133
1da177e4
LT
134 addr = vma->vm_end;
135 }
136}
137
cc503c1b
JK
138
139unsigned long
140arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
141 const unsigned long len, const unsigned long pgoff,
142 const unsigned long flags)
143{
144 struct vm_area_struct *vma;
145 struct mm_struct *mm = current->mm;
146 unsigned long addr = addr0;
147
148 /* requested length too big for entire address space */
149 if (len > TASK_SIZE)
150 return -ENOMEM;
151
152 if (flags & MAP_FIXED)
153 return addr;
154
155 /* for MAP_32BIT mappings we force the legact mmap base */
156 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
157 goto bottomup;
158
159 /* requesting a specific address */
160 if (addr) {
161 addr = PAGE_ALIGN(addr);
162 vma = find_vma(mm, addr);
163 if (TASK_SIZE - len >= addr &&
164 (!vma || addr + len <= vma->vm_start))
165 return addr;
166 }
167
168 /* check if free_area_cache is useful for us */
169 if (len <= mm->cached_hole_size) {
bb1ad820
AM
170 mm->cached_hole_size = 0;
171 mm->free_area_cache = mm->mmap_base;
172 }
cc503c1b
JK
173
174 /* either no address requested or can't fit in requested address hole */
175 addr = mm->free_area_cache;
176
177 /* make sure it can fit in the remaining address space */
178 if (addr > len) {
179 vma = find_vma(mm, addr-len);
180 if (!vma || addr <= vma->vm_start)
181 /* remember the address as a hint for next time */
e9c8abb6 182 return mm->free_area_cache = addr-len;
cc503c1b
JK
183 }
184
185 if (mm->mmap_base < len)
186 goto bottomup;
187
188 addr = mm->mmap_base-len;
189
190 do {
191 /*
192 * Lookup failure means no vma is above this address,
193 * else if new region fits below vma->vm_start,
194 * return with success:
195 */
196 vma = find_vma(mm, addr);
197 if (!vma || addr+len <= vma->vm_start)
198 /* remember the address as a hint for next time */
e9c8abb6 199 return mm->free_area_cache = addr;
cc503c1b 200
bb1ad820
AM
201 /* remember the largest hole we saw so far */
202 if (addr + mm->cached_hole_size < vma->vm_start)
203 mm->cached_hole_size = vma->vm_start - addr;
cc503c1b
JK
204
205 /* try just below the current vma->vm_start */
206 addr = vma->vm_start-len;
207 } while (len < vma->vm_start);
208
209bottomup:
210 /*
211 * A failed mmap() very likely causes application failure,
212 * so fall back to the bottom-up function here. This scenario
213 * can happen with large stack limits and large mmap()
214 * allocations.
215 */
216 mm->cached_hole_size = ~0UL;
bb1ad820 217 mm->free_area_cache = TASK_UNMAPPED_BASE;
cc503c1b
JK
218 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
219 /*
220 * Restore the topdown base:
221 */
222 mm->free_area_cache = mm->mmap_base;
223 mm->cached_hole_size = ~0UL;
224
225 return addr;
226}
227
228
e9c8abb6 229asmlinkage long sys_uname(struct new_utsname __user *name)
1da177e4
LT
230{
231 int err;
232 down_read(&uts_sem);
e9c8abb6 233 err = copy_to_user(name, utsname(), sizeof(*name));
1da177e4 234 up_read(&uts_sem);
e9c8abb6
GP
235 if (personality(current->personality) == PER_LINUX32)
236 err |= copy_to_user(&name->machine, "i686", 5);
1da177e4
LT
237 return err ? -EFAULT : 0;
238}