Merge tag 'v3.10.107' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / kernel / sys_x86_64.c
CommitLineData
1da177e4
LT
1#include <linux/errno.h>
2#include <linux/sched.h>
3#include <linux/syscalls.h>
4#include <linux/mm.h>
4e950f6f 5#include <linux/fs.h>
1da177e4 6#include <linux/smp.h>
1da177e4
LT
7#include <linux/sem.h>
8#include <linux/msg.h>
9#include <linux/shm.h>
10#include <linux/stat.h>
11#include <linux/mman.h>
12#include <linux/file.h>
13#include <linux/utsname.h>
14#include <linux/personality.h>
cc503c1b 15#include <linux/random.h>
e9c8abb6 16#include <linux/uaccess.h>
910b2c51 17#include <linux/elf.h>
1da177e4 18
1da177e4 19#include <asm/ia32.h>
bbc1f698 20#include <asm/syscalls.h>
1da177e4 21
dfb09f9b
BP
22/*
23 * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
dfb09f9b 24 */
f9902472 25static unsigned long get_align_mask(void)
dfb09f9b 26{
dfb09f9b
BP
27 /* handle 32- and 64-bit case with a single conditional */
28 if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32())))
f9902472 29 return 0;
dfb09f9b
BP
30
31 if (!(current->flags & PF_RANDOMIZE))
f9902472 32 return 0;
dfb09f9b 33
f9902472
ML
34 return va_align.mask;
35}
dfb09f9b 36
f9902472
ML
37unsigned long align_vdso_addr(unsigned long addr)
38{
39 unsigned long align_mask = get_align_mask();
40 return (addr + align_mask) & ~align_mask;
dfb09f9b
BP
41}
42
43static int __init control_va_addr_alignment(char *str)
44{
45 /* guard against enabling this on other CPU families */
46 if (va_align.flags < 0)
47 return 1;
48
49 if (*str == 0)
50 return 1;
51
52 if (*str == '=')
53 str++;
54
55 if (!strcmp(str, "32"))
56 va_align.flags = ALIGN_VA_32;
57 else if (!strcmp(str, "64"))
58 va_align.flags = ALIGN_VA_64;
59 else if (!strcmp(str, "off"))
60 va_align.flags = 0;
61 else if (!strcmp(str, "on"))
62 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
63 else
64 return 0;
65
66 return 1;
67}
68__setup("align_va_addr", control_va_addr_alignment);
69
0ac676fb
JB
70SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
71 unsigned long, prot, unsigned long, flags,
72 unsigned long, fd, unsigned long, off)
1da177e4
LT
73{
74 long error;
1da177e4
LT
75 error = -EINVAL;
76 if (off & ~PAGE_MASK)
77 goto out;
78
f8b72560 79 error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
1da177e4
LT
80out:
81 return error;
82}
83
84static void find_start_end(unsigned long flags, unsigned long *begin,
85 unsigned long *end)
86{
6bd33008 87 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
cc503c1b 88 unsigned long new_begin;
1da177e4
LT
89 /* This is usually used needed to map code in small
90 model, so it needs to be in the first 31bit. Limit
91 it to that. This means we need to move the
92 unmapped base down for this case. This can give
93 conflicts with the heap, but we assume that glibc
94 malloc knows how to fall back to mmap. Give it 1GB
e9c8abb6
GP
95 of playground for now. -AK */
96 *begin = 0x40000000;
97 *end = 0x80000000;
cc503c1b
JK
98 if (current->flags & PF_RANDOMIZE) {
99 new_begin = randomize_range(*begin, *begin + 0x02000000, 0);
100 if (new_begin)
101 *begin = new_begin;
102 }
84929801 103 } else {
ff1a668b 104 *begin = current->mm->mmap_legacy_base;
e9c8abb6 105 *end = TASK_SIZE;
84929801 106 }
e9c8abb6 107}
1da177e4
LT
108
109unsigned long
110arch_get_unmapped_area(struct file *filp, unsigned long addr,
111 unsigned long len, unsigned long pgoff, unsigned long flags)
112{
113 struct mm_struct *mm = current->mm;
114 struct vm_area_struct *vma;
f9902472 115 struct vm_unmapped_area_info info;
1da177e4 116 unsigned long begin, end;
e9c8abb6 117
11300a64
BH
118 if (flags & MAP_FIXED)
119 return addr;
120
e9c8abb6 121 find_start_end(flags, &begin, &end);
1da177e4
LT
122
123 if (len > end)
124 return -ENOMEM;
125
126 if (addr) {
127 addr = PAGE_ALIGN(addr);
128 vma = find_vma(mm, addr);
129 if (end - len >= addr &&
1ad9a25d 130 (!vma || addr + len <= vm_start_gap(vma)))
1da177e4
LT
131 return addr;
132 }
1363c3cd 133
f9902472
ML
134 info.flags = 0;
135 info.length = len;
136 info.low_limit = begin;
137 info.high_limit = end;
138 info.align_mask = filp ? get_align_mask() : 0;
7d025059 139 info.align_offset = pgoff << PAGE_SHIFT;
f9902472 140 return vm_unmapped_area(&info);
1da177e4
LT
141}
142
cc503c1b
JK
143unsigned long
144arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
145 const unsigned long len, const unsigned long pgoff,
146 const unsigned long flags)
147{
148 struct vm_area_struct *vma;
149 struct mm_struct *mm = current->mm;
f9902472
ML
150 unsigned long addr = addr0;
151 struct vm_unmapped_area_info info;
cc503c1b
JK
152
153 /* requested length too big for entire address space */
154 if (len > TASK_SIZE)
155 return -ENOMEM;
156
157 if (flags & MAP_FIXED)
158 return addr;
159
e3e81aca 160 /* for MAP_32BIT mappings we force the legacy mmap base */
6bd33008 161 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
cc503c1b
JK
162 goto bottomup;
163
164 /* requesting a specific address */
165 if (addr) {
166 addr = PAGE_ALIGN(addr);
167 vma = find_vma(mm, addr);
168 if (TASK_SIZE - len >= addr &&
1ad9a25d 169 (!vma || addr + len <= vm_start_gap(vma)))
cc503c1b
JK
170 return addr;
171 }
172
f9902472
ML
173 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
174 info.length = len;
175 info.low_limit = PAGE_SIZE;
176 info.high_limit = mm->mmap_base;
177 info.align_mask = filp ? get_align_mask() : 0;
7d025059 178 info.align_offset = pgoff << PAGE_SHIFT;
f9902472
ML
179 addr = vm_unmapped_area(&info);
180 if (!(addr & ~PAGE_MASK))
181 return addr;
182 VM_BUG_ON(addr != -ENOMEM);
b716ad95 183
cc503c1b
JK
184bottomup:
185 /*
186 * A failed mmap() very likely causes application failure,
187 * so fall back to the bottom-up function here. This scenario
188 * can happen with large stack limits and large mmap()
189 * allocations.
190 */
f9902472 191 return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
cc503c1b 192}