x86: cache_info: Update calculation of AMD L3 cache indices
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / kernel / sys_x86_64.c
CommitLineData
1da177e4
LT
1#include <linux/errno.h>
2#include <linux/sched.h>
3#include <linux/syscalls.h>
4#include <linux/mm.h>
4e950f6f 5#include <linux/fs.h>
1da177e4 6#include <linux/smp.h>
1da177e4
LT
7#include <linux/sem.h>
8#include <linux/msg.h>
9#include <linux/shm.h>
10#include <linux/stat.h>
11#include <linux/mman.h>
12#include <linux/file.h>
13#include <linux/utsname.h>
14#include <linux/personality.h>
cc503c1b 15#include <linux/random.h>
e9c8abb6 16#include <linux/uaccess.h>
1da177e4 17
1da177e4 18#include <asm/ia32.h>
bbc1f698 19#include <asm/syscalls.h>
1da177e4 20
dfb09f9b
BP
21/*
22 * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
23 *
24 * @flags denotes the allocation direction - bottomup or topdown -
25 * or vDSO; see call sites below.
26 */
27unsigned long align_addr(unsigned long addr, struct file *filp,
28 enum align_flags flags)
29{
30 unsigned long tmp_addr;
31
32 /* handle 32- and 64-bit case with a single conditional */
33 if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32())))
34 return addr;
35
36 if (!(current->flags & PF_RANDOMIZE))
37 return addr;
38
39 if (!((flags & ALIGN_VDSO) || filp))
40 return addr;
41
42 tmp_addr = addr;
43
44 /*
45 * We need an address which is <= than the original
46 * one only when in topdown direction.
47 */
48 if (!(flags & ALIGN_TOPDOWN))
49 tmp_addr += va_align.mask;
50
51 tmp_addr &= ~va_align.mask;
52
53 return tmp_addr;
54}
55
56static int __init control_va_addr_alignment(char *str)
57{
58 /* guard against enabling this on other CPU families */
59 if (va_align.flags < 0)
60 return 1;
61
62 if (*str == 0)
63 return 1;
64
65 if (*str == '=')
66 str++;
67
68 if (!strcmp(str, "32"))
69 va_align.flags = ALIGN_VA_32;
70 else if (!strcmp(str, "64"))
71 va_align.flags = ALIGN_VA_64;
72 else if (!strcmp(str, "off"))
73 va_align.flags = 0;
74 else if (!strcmp(str, "on"))
75 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
76 else
77 return 0;
78
79 return 1;
80}
81__setup("align_va_addr", control_va_addr_alignment);
82
0ac676fb
JB
83SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
84 unsigned long, prot, unsigned long, flags,
85 unsigned long, fd, unsigned long, off)
1da177e4
LT
86{
87 long error;
1da177e4
LT
88 error = -EINVAL;
89 if (off & ~PAGE_MASK)
90 goto out;
91
f8b72560 92 error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
1da177e4
LT
93out:
94 return error;
95}
96
97static void find_start_end(unsigned long flags, unsigned long *begin,
98 unsigned long *end)
99{
84929801 100 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
cc503c1b 101 unsigned long new_begin;
1da177e4
LT
102 /* This is usually used needed to map code in small
103 model, so it needs to be in the first 31bit. Limit
104 it to that. This means we need to move the
105 unmapped base down for this case. This can give
106 conflicts with the heap, but we assume that glibc
107 malloc knows how to fall back to mmap. Give it 1GB
e9c8abb6
GP
108 of playground for now. -AK */
109 *begin = 0x40000000;
110 *end = 0x80000000;
cc503c1b
JK
111 if (current->flags & PF_RANDOMIZE) {
112 new_begin = randomize_range(*begin, *begin + 0x02000000, 0);
113 if (new_begin)
114 *begin = new_begin;
115 }
84929801
SS
116 } else {
117 *begin = TASK_UNMAPPED_BASE;
e9c8abb6 118 *end = TASK_SIZE;
84929801 119 }
e9c8abb6 120}
1da177e4
LT
121
122unsigned long
123arch_get_unmapped_area(struct file *filp, unsigned long addr,
124 unsigned long len, unsigned long pgoff, unsigned long flags)
125{
126 struct mm_struct *mm = current->mm;
127 struct vm_area_struct *vma;
128 unsigned long start_addr;
129 unsigned long begin, end;
e9c8abb6 130
11300a64
BH
131 if (flags & MAP_FIXED)
132 return addr;
133
e9c8abb6 134 find_start_end(flags, &begin, &end);
1da177e4
LT
135
136 if (len > end)
137 return -ENOMEM;
138
139 if (addr) {
140 addr = PAGE_ALIGN(addr);
141 vma = find_vma(mm, addr);
142 if (end - len >= addr &&
143 (!vma || addr + len <= vma->vm_start))
144 return addr;
145 }
1363c3cd
WW
146 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
147 && len <= mm->cached_hole_size) {
e9c8abb6 148 mm->cached_hole_size = 0;
1363c3cd
WW
149 mm->free_area_cache = begin;
150 }
1da177e4 151 addr = mm->free_area_cache;
e9c8abb6
GP
152 if (addr < begin)
153 addr = begin;
1da177e4
LT
154 start_addr = addr;
155
156full_search:
dfb09f9b
BP
157
158 addr = align_addr(addr, filp, 0);
159
1da177e4
LT
160 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
161 /* At this point: (!vma || addr < vma->vm_end). */
162 if (end - len < addr) {
163 /*
164 * Start a new search - just in case we missed
165 * some holes.
166 */
167 if (start_addr != begin) {
168 start_addr = addr = begin;
1363c3cd 169 mm->cached_hole_size = 0;
1da177e4
LT
170 goto full_search;
171 }
172 return -ENOMEM;
173 }
174 if (!vma || addr + len <= vma->vm_start) {
175 /*
176 * Remember the place where we stopped the search:
177 */
178 mm->free_area_cache = addr + len;
179 return addr;
180 }
1363c3cd 181 if (addr + mm->cached_hole_size < vma->vm_start)
e9c8abb6 182 mm->cached_hole_size = vma->vm_start - addr;
1363c3cd 183
1da177e4 184 addr = vma->vm_end;
dfb09f9b 185 addr = align_addr(addr, filp, 0);
1da177e4
LT
186 }
187}
188
cc503c1b
JK
189
190unsigned long
191arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
192 const unsigned long len, const unsigned long pgoff,
193 const unsigned long flags)
194{
195 struct vm_area_struct *vma;
196 struct mm_struct *mm = current->mm;
197 unsigned long addr = addr0;
198
199 /* requested length too big for entire address space */
200 if (len > TASK_SIZE)
201 return -ENOMEM;
202
203 if (flags & MAP_FIXED)
204 return addr;
205
206 /* for MAP_32BIT mappings we force the legact mmap base */
207 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
208 goto bottomup;
209
210 /* requesting a specific address */
211 if (addr) {
212 addr = PAGE_ALIGN(addr);
213 vma = find_vma(mm, addr);
214 if (TASK_SIZE - len >= addr &&
215 (!vma || addr + len <= vma->vm_start))
216 return addr;
217 }
218
219 /* check if free_area_cache is useful for us */
220 if (len <= mm->cached_hole_size) {
bb1ad820
AM
221 mm->cached_hole_size = 0;
222 mm->free_area_cache = mm->mmap_base;
223 }
cc503c1b
JK
224
225 /* either no address requested or can't fit in requested address hole */
226 addr = mm->free_area_cache;
227
228 /* make sure it can fit in the remaining address space */
229 if (addr > len) {
dfb09f9b
BP
230 unsigned long tmp_addr = align_addr(addr - len, filp,
231 ALIGN_TOPDOWN);
232
233 vma = find_vma(mm, tmp_addr);
234 if (!vma || tmp_addr + len <= vma->vm_start)
cc503c1b 235 /* remember the address as a hint for next time */
dfb09f9b 236 return mm->free_area_cache = tmp_addr;
cc503c1b
JK
237 }
238
239 if (mm->mmap_base < len)
240 goto bottomup;
241
242 addr = mm->mmap_base-len;
243
244 do {
dfb09f9b
BP
245 addr = align_addr(addr, filp, ALIGN_TOPDOWN);
246
cc503c1b
JK
247 /*
248 * Lookup failure means no vma is above this address,
249 * else if new region fits below vma->vm_start,
250 * return with success:
251 */
252 vma = find_vma(mm, addr);
253 if (!vma || addr+len <= vma->vm_start)
254 /* remember the address as a hint for next time */
e9c8abb6 255 return mm->free_area_cache = addr;
cc503c1b 256
bb1ad820
AM
257 /* remember the largest hole we saw so far */
258 if (addr + mm->cached_hole_size < vma->vm_start)
259 mm->cached_hole_size = vma->vm_start - addr;
cc503c1b
JK
260
261 /* try just below the current vma->vm_start */
262 addr = vma->vm_start-len;
263 } while (len < vma->vm_start);
264
265bottomup:
266 /*
267 * A failed mmap() very likely causes application failure,
268 * so fall back to the bottom-up function here. This scenario
269 * can happen with large stack limits and large mmap()
270 * allocations.
271 */
272 mm->cached_hole_size = ~0UL;
bb1ad820 273 mm->free_area_cache = TASK_UNMAPPED_BASE;
cc503c1b
JK
274 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
275 /*
276 * Restore the topdown base:
277 */
278 mm->free_area_cache = mm->mmap_base;
279 mm->cached_hole_size = ~0UL;
280
281 return addr;
282}