License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[GitHub/MotorolaMobilityLLC/kernel-slsi.git] / arch / x86 / kernel / sys_x86_64.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2#include <linux/errno.h>
3#include <linux/sched.h>
01042607 4#include <linux/sched/mm.h>
1da177e4
LT
5#include <linux/syscalls.h>
6#include <linux/mm.h>
4e950f6f 7#include <linux/fs.h>
1da177e4 8#include <linux/smp.h>
1da177e4
LT
9#include <linux/sem.h>
10#include <linux/msg.h>
11#include <linux/shm.h>
12#include <linux/stat.h>
13#include <linux/mman.h>
14#include <linux/file.h>
15#include <linux/utsname.h>
16#include <linux/personality.h>
cc503c1b 17#include <linux/random.h>
e9c8abb6 18#include <linux/uaccess.h>
910b2c51 19#include <linux/elf.h>
1da177e4 20
1b028f78
DS
21#include <asm/elf.h>
22#include <asm/compat.h>
1da177e4 23#include <asm/ia32.h>
bbc1f698 24#include <asm/syscalls.h>
44b04912 25#include <asm/mpx.h>
1da177e4 26
dfb09f9b
BP
27/*
28 * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
dfb09f9b 29 */
f9902472 30static unsigned long get_align_mask(void)
dfb09f9b 31{
dfb09f9b
BP
32 /* handle 32- and 64-bit case with a single conditional */
33 if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32())))
f9902472 34 return 0;
dfb09f9b
BP
35
36 if (!(current->flags & PF_RANDOMIZE))
f9902472 37 return 0;
dfb09f9b 38
f9902472
ML
39 return va_align.mask;
40}
dfb09f9b 41
4e26d11f
HMG
42/*
43 * To avoid aliasing in the I$ on AMD F15h, the bits defined by the
44 * va_align.bits, [12:upper_bit), are set to a random value instead of
45 * zeroing them. This random value is computed once per boot. This form
46 * of ASLR is known as "per-boot ASLR".
47 *
48 * To achieve this, the random value is added to the info.align_offset
49 * value before calling vm_unmapped_area() or ORed directly to the
50 * address.
51 */
52static unsigned long get_align_bits(void)
53{
54 return va_align.bits & get_align_mask();
55}
56
f9902472
ML
57unsigned long align_vdso_addr(unsigned long addr)
58{
59 unsigned long align_mask = get_align_mask();
4e26d11f
HMG
60 addr = (addr + align_mask) & ~align_mask;
61 return addr | get_align_bits();
dfb09f9b
BP
62}
63
64static int __init control_va_addr_alignment(char *str)
65{
66 /* guard against enabling this on other CPU families */
67 if (va_align.flags < 0)
68 return 1;
69
70 if (*str == 0)
71 return 1;
72
73 if (*str == '=')
74 str++;
75
76 if (!strcmp(str, "32"))
77 va_align.flags = ALIGN_VA_32;
78 else if (!strcmp(str, "64"))
79 va_align.flags = ALIGN_VA_64;
80 else if (!strcmp(str, "off"))
81 va_align.flags = 0;
82 else if (!strcmp(str, "on"))
83 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
84 else
85 return 0;
86
87 return 1;
88}
89__setup("align_va_addr", control_va_addr_alignment);
90
0ac676fb
JB
91SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
92 unsigned long, prot, unsigned long, flags,
93 unsigned long, fd, unsigned long, off)
1da177e4
LT
94{
95 long error;
1da177e4
LT
96 error = -EINVAL;
97 if (off & ~PAGE_MASK)
98 goto out;
99
f8b72560 100 error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
1da177e4
LT
101out:
102 return error;
103}
104
b569bab7
KS
105static void find_start_end(unsigned long addr, unsigned long flags,
106 unsigned long *begin, unsigned long *end)
1da177e4 107{
3e6ef9c8 108 if (!in_compat_syscall() && (flags & MAP_32BIT)) {
1da177e4
LT
109 /* This is usually used needed to map code in small
110 model, so it needs to be in the first 31bit. Limit
111 it to that. This means we need to move the
112 unmapped base down for this case. This can give
113 conflicts with the heap, but we assume that glibc
114 malloc knows how to fall back to mmap. Give it 1GB
e9c8abb6
GP
115 of playground for now. -AK */
116 *begin = 0x40000000;
117 *end = 0x80000000;
cc503c1b 118 if (current->flags & PF_RANDOMIZE) {
9c6f0902 119 *begin = randomize_page(*begin, 0x02000000);
cc503c1b 120 }
1b028f78 121 return;
84929801 122 }
1b028f78
DS
123
124 *begin = get_mmap_base(1);
b569bab7
KS
125 if (in_compat_syscall())
126 *end = task_size_32bit();
127 else
128 *end = task_size_64bit(addr > DEFAULT_MAP_WINDOW);
e9c8abb6 129}
1da177e4
LT
130
131unsigned long
132arch_get_unmapped_area(struct file *filp, unsigned long addr,
133 unsigned long len, unsigned long pgoff, unsigned long flags)
134{
135 struct mm_struct *mm = current->mm;
136 struct vm_area_struct *vma;
f9902472 137 struct vm_unmapped_area_info info;
1da177e4 138 unsigned long begin, end;
e9c8abb6 139
44b04912
KS
140 addr = mpx_unmapped_area_check(addr, len, flags);
141 if (IS_ERR_VALUE(addr))
142 return addr;
143
11300a64
BH
144 if (flags & MAP_FIXED)
145 return addr;
146
b569bab7 147 find_start_end(addr, flags, &begin, &end);
1da177e4
LT
148
149 if (len > end)
150 return -ENOMEM;
151
152 if (addr) {
153 addr = PAGE_ALIGN(addr);
154 vma = find_vma(mm, addr);
155 if (end - len >= addr &&
1be7107f 156 (!vma || addr + len <= vm_start_gap(vma)))
1da177e4
LT
157 return addr;
158 }
1363c3cd 159
f9902472
ML
160 info.flags = 0;
161 info.length = len;
162 info.low_limit = begin;
163 info.high_limit = end;
4e26d11f 164 info.align_mask = 0;
7d025059 165 info.align_offset = pgoff << PAGE_SHIFT;
4e26d11f
HMG
166 if (filp) {
167 info.align_mask = get_align_mask();
168 info.align_offset += get_align_bits();
169 }
f9902472 170 return vm_unmapped_area(&info);
1da177e4
LT
171}
172
cc503c1b
JK
173unsigned long
174arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
175 const unsigned long len, const unsigned long pgoff,
176 const unsigned long flags)
177{
178 struct vm_area_struct *vma;
179 struct mm_struct *mm = current->mm;
f9902472
ML
180 unsigned long addr = addr0;
181 struct vm_unmapped_area_info info;
cc503c1b 182
44b04912
KS
183 addr = mpx_unmapped_area_check(addr, len, flags);
184 if (IS_ERR_VALUE(addr))
185 return addr;
186
cc503c1b
JK
187 /* requested length too big for entire address space */
188 if (len > TASK_SIZE)
189 return -ENOMEM;
190
191 if (flags & MAP_FIXED)
192 return addr;
193
e3e81aca 194 /* for MAP_32BIT mappings we force the legacy mmap base */
3e6ef9c8 195 if (!in_compat_syscall() && (flags & MAP_32BIT))
cc503c1b
JK
196 goto bottomup;
197
198 /* requesting a specific address */
199 if (addr) {
200 addr = PAGE_ALIGN(addr);
201 vma = find_vma(mm, addr);
202 if (TASK_SIZE - len >= addr &&
1be7107f 203 (!vma || addr + len <= vm_start_gap(vma)))
cc503c1b
JK
204 return addr;
205 }
206
f9902472
ML
207 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
208 info.length = len;
209 info.low_limit = PAGE_SIZE;
1b028f78 210 info.high_limit = get_mmap_base(0);
b569bab7
KS
211
212 /*
213 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
214 * in the full address space.
215 *
216 * !in_compat_syscall() check to avoid high addresses for x32.
217 */
218 if (addr > DEFAULT_MAP_WINDOW && !in_compat_syscall())
219 info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
220
4e26d11f 221 info.align_mask = 0;
7d025059 222 info.align_offset = pgoff << PAGE_SHIFT;
4e26d11f
HMG
223 if (filp) {
224 info.align_mask = get_align_mask();
225 info.align_offset += get_align_bits();
226 }
f9902472
ML
227 addr = vm_unmapped_area(&info);
228 if (!(addr & ~PAGE_MASK))
229 return addr;
230 VM_BUG_ON(addr != -ENOMEM);
b716ad95 231
cc503c1b
JK
232bottomup:
233 /*
234 * A failed mmap() very likely causes application failure,
235 * so fall back to the bottom-up function here. This scenario
236 * can happen with large stack limits and large mmap()
237 * allocations.
238 */
f9902472 239 return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
cc503c1b 240}