tmpfs: don't undo fallocate past its last page
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / mm / util.c
1 #include <linux/mm.h>
2 #include <linux/slab.h>
3 #include <linux/string.h>
4 #include <linux/export.h>
5 #include <linux/err.h>
6 #include <linux/sched.h>
7 #include <linux/security.h>
8 #include <linux/swap.h>
9 #include <linux/swapops.h>
10 #include <asm/uaccess.h>
11
12 #include "internal.h"
13
14 #define CREATE_TRACE_POINTS
15 #include <trace/events/kmem.h>
16
17 /**
18 * kstrdup - allocate space for and copy an existing string
19 * @s: the string to duplicate
20 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
21 */
22 char *kstrdup(const char *s, gfp_t gfp)
23 {
24 size_t len;
25 char *buf;
26
27 if (!s)
28 return NULL;
29
30 len = strlen(s) + 1;
31 buf = kmalloc_track_caller(len, gfp);
32 if (buf)
33 memcpy(buf, s, len);
34 return buf;
35 }
36 EXPORT_SYMBOL(kstrdup);
37
38 /**
39 * kstrndup - allocate space for and copy an existing string
40 * @s: the string to duplicate
41 * @max: read at most @max chars from @s
42 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
43 */
44 char *kstrndup(const char *s, size_t max, gfp_t gfp)
45 {
46 size_t len;
47 char *buf;
48
49 if (!s)
50 return NULL;
51
52 len = strnlen(s, max);
53 buf = kmalloc_track_caller(len+1, gfp);
54 if (buf) {
55 memcpy(buf, s, len);
56 buf[len] = '\0';
57 }
58 return buf;
59 }
60 EXPORT_SYMBOL(kstrndup);
61
62 /**
63 * kmemdup - duplicate region of memory
64 *
65 * @src: memory region to duplicate
66 * @len: memory region length
67 * @gfp: GFP mask to use
68 */
69 void *kmemdup(const void *src, size_t len, gfp_t gfp)
70 {
71 void *p;
72
73 p = kmalloc_track_caller(len, gfp);
74 if (p)
75 memcpy(p, src, len);
76 return p;
77 }
78 EXPORT_SYMBOL(kmemdup);
79
80 /**
81 * memdup_user - duplicate memory region from user space
82 *
83 * @src: source address in user space
84 * @len: number of bytes to copy
85 *
86 * Returns an ERR_PTR() on failure.
87 */
88 void *memdup_user(const void __user *src, size_t len)
89 {
90 void *p;
91
92 /*
93 * Always use GFP_KERNEL, since copy_from_user() can sleep and
94 * cause pagefault, which makes it pointless to use GFP_NOFS
95 * or GFP_ATOMIC.
96 */
97 p = kmalloc_track_caller(len, GFP_KERNEL);
98 if (!p)
99 return ERR_PTR(-ENOMEM);
100
101 if (copy_from_user(p, src, len)) {
102 kfree(p);
103 return ERR_PTR(-EFAULT);
104 }
105
106 return p;
107 }
108 EXPORT_SYMBOL(memdup_user);
109
110 static __always_inline void *__do_krealloc(const void *p, size_t new_size,
111 gfp_t flags)
112 {
113 void *ret;
114 size_t ks = 0;
115
116 if (p)
117 ks = ksize(p);
118
119 if (ks >= new_size)
120 return (void *)p;
121
122 ret = kmalloc_track_caller(new_size, flags);
123 if (ret && p)
124 memcpy(ret, p, ks);
125
126 return ret;
127 }
128
129 /**
130 * __krealloc - like krealloc() but don't free @p.
131 * @p: object to reallocate memory for.
132 * @new_size: how many bytes of memory are required.
133 * @flags: the type of memory to allocate.
134 *
135 * This function is like krealloc() except it never frees the originally
136 * allocated buffer. Use this if you don't want to free the buffer immediately
137 * like, for example, with RCU.
138 */
139 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
140 {
141 if (unlikely(!new_size))
142 return ZERO_SIZE_PTR;
143
144 return __do_krealloc(p, new_size, flags);
145
146 }
147 EXPORT_SYMBOL(__krealloc);
148
149 /**
150 * krealloc - reallocate memory. The contents will remain unchanged.
151 * @p: object to reallocate memory for.
152 * @new_size: how many bytes of memory are required.
153 * @flags: the type of memory to allocate.
154 *
155 * The contents of the object pointed to are preserved up to the
156 * lesser of the new and old sizes. If @p is %NULL, krealloc()
157 * behaves exactly like kmalloc(). If @new_size is 0 and @p is not a
158 * %NULL pointer, the object pointed to is freed.
159 */
160 void *krealloc(const void *p, size_t new_size, gfp_t flags)
161 {
162 void *ret;
163
164 if (unlikely(!new_size)) {
165 kfree(p);
166 return ZERO_SIZE_PTR;
167 }
168
169 ret = __do_krealloc(p, new_size, flags);
170 if (ret && p != ret)
171 kfree(p);
172
173 return ret;
174 }
175 EXPORT_SYMBOL(krealloc);
176
177 /**
178 * kzfree - like kfree but zero memory
179 * @p: object to free memory of
180 *
181 * The memory of the object @p points to is zeroed before freed.
182 * If @p is %NULL, kzfree() does nothing.
183 *
184 * Note: this function zeroes the whole allocated buffer which can be a good
185 * deal bigger than the requested buffer size passed to kmalloc(). So be
186 * careful when using this function in performance sensitive code.
187 */
188 void kzfree(const void *p)
189 {
190 size_t ks;
191 void *mem = (void *)p;
192
193 if (unlikely(ZERO_OR_NULL_PTR(mem)))
194 return;
195 ks = ksize(mem);
196 memset(mem, 0, ks);
197 kfree(mem);
198 }
199 EXPORT_SYMBOL(kzfree);
200
201 /*
202 * strndup_user - duplicate an existing string from user space
203 * @s: The string to duplicate
204 * @n: Maximum number of bytes to copy, including the trailing NUL.
205 */
206 char *strndup_user(const char __user *s, long n)
207 {
208 char *p;
209 long length;
210
211 length = strnlen_user(s, n);
212
213 if (!length)
214 return ERR_PTR(-EFAULT);
215
216 if (length > n)
217 return ERR_PTR(-EINVAL);
218
219 p = memdup_user(s, length);
220
221 if (IS_ERR(p))
222 return p;
223
224 p[length - 1] = '\0';
225
226 return p;
227 }
228 EXPORT_SYMBOL(strndup_user);
229
230 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
231 struct vm_area_struct *prev, struct rb_node *rb_parent)
232 {
233 struct vm_area_struct *next;
234
235 vma->vm_prev = prev;
236 if (prev) {
237 next = prev->vm_next;
238 prev->vm_next = vma;
239 } else {
240 mm->mmap = vma;
241 if (rb_parent)
242 next = rb_entry(rb_parent,
243 struct vm_area_struct, vm_rb);
244 else
245 next = NULL;
246 }
247 vma->vm_next = next;
248 if (next)
249 next->vm_prev = vma;
250 }
251
252 /* Check if the vma is being used as a stack by this task */
253 static int vm_is_stack_for_task(struct task_struct *t,
254 struct vm_area_struct *vma)
255 {
256 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
257 }
258
259 /*
260 * Check if the vma is being used as a stack.
261 * If is_group is non-zero, check in the entire thread group or else
262 * just check in the current task. Returns the pid of the task that
263 * the vma is stack for.
264 */
265 pid_t vm_is_stack(struct task_struct *task,
266 struct vm_area_struct *vma, int in_group)
267 {
268 pid_t ret = 0;
269
270 if (vm_is_stack_for_task(task, vma))
271 return task->pid;
272
273 if (in_group) {
274 struct task_struct *t;
275
276 rcu_read_lock();
277 for_each_thread(task, t) {
278 if (vm_is_stack_for_task(t, vma)) {
279 ret = t->pid;
280 goto done;
281 }
282 }
283 done:
284 rcu_read_unlock();
285 }
286
287 return ret;
288 }
289
290 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
291 void arch_pick_mmap_layout(struct mm_struct *mm)
292 {
293 mm->mmap_base = TASK_UNMAPPED_BASE;
294 mm->get_unmapped_area = arch_get_unmapped_area;
295 mm->unmap_area = arch_unmap_area;
296 }
297 #endif
298
299 /*
300 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
301 * back to the regular GUP.
302 * If the architecture not support this function, simply return with no
303 * page pinned
304 */
305 int __attribute__((weak)) __get_user_pages_fast(unsigned long start,
306 int nr_pages, int write, struct page **pages)
307 {
308 return 0;
309 }
310 EXPORT_SYMBOL_GPL(__get_user_pages_fast);
311
312 /**
313 * get_user_pages_fast() - pin user pages in memory
314 * @start: starting user address
315 * @nr_pages: number of pages from start to pin
316 * @write: whether pages will be written to
317 * @pages: array that receives pointers to the pages pinned.
318 * Should be at least nr_pages long.
319 *
320 * Returns number of pages pinned. This may be fewer than the number
321 * requested. If nr_pages is 0 or negative, returns 0. If no pages
322 * were pinned, returns -errno.
323 *
324 * get_user_pages_fast provides equivalent functionality to get_user_pages,
325 * operating on current and current->mm, with force=0 and vma=NULL. However
326 * unlike get_user_pages, it must be called without mmap_sem held.
327 *
328 * get_user_pages_fast may take mmap_sem and page table locks, so no
329 * assumptions can be made about lack of locking. get_user_pages_fast is to be
330 * implemented in a way that is advantageous (vs get_user_pages()) when the
331 * user memory area is already faulted in and present in ptes. However if the
332 * pages have to be faulted in, it may turn out to be slightly slower so
333 * callers need to carefully consider what to use. On many architectures,
334 * get_user_pages_fast simply falls back to get_user_pages.
335 */
336 int __attribute__((weak)) get_user_pages_fast(unsigned long start,
337 int nr_pages, int write, struct page **pages)
338 {
339 struct mm_struct *mm = current->mm;
340 int ret;
341
342 down_read(&mm->mmap_sem);
343 ret = get_user_pages(current, mm, start, nr_pages,
344 write, 0, pages, NULL);
345 up_read(&mm->mmap_sem);
346
347 return ret;
348 }
349 EXPORT_SYMBOL_GPL(get_user_pages_fast);
350
351 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
352 unsigned long len, unsigned long prot,
353 unsigned long flag, unsigned long pgoff)
354 {
355 unsigned long ret;
356 struct mm_struct *mm = current->mm;
357 unsigned long populate;
358
359 ret = security_mmap_file(file, prot, flag);
360 if (!ret) {
361 down_write(&mm->mmap_sem);
362 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
363 &populate);
364 up_write(&mm->mmap_sem);
365 if (populate)
366 mm_populate(ret, populate);
367 }
368 return ret;
369 }
370
371 unsigned long vm_mmap(struct file *file, unsigned long addr,
372 unsigned long len, unsigned long prot,
373 unsigned long flag, unsigned long offset)
374 {
375 if (unlikely(offset + PAGE_ALIGN(len) < offset))
376 return -EINVAL;
377 if (unlikely(offset & ~PAGE_MASK))
378 return -EINVAL;
379
380 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
381 }
382 EXPORT_SYMBOL(vm_mmap);
383
384 struct address_space *page_mapping(struct page *page)
385 {
386 struct address_space *mapping = page->mapping;
387
388 VM_BUG_ON(PageSlab(page));
389 #ifdef CONFIG_SWAP
390 if (unlikely(PageSwapCache(page))) {
391 swp_entry_t entry;
392
393 entry.val = page_private(page);
394 mapping = swap_address_space(entry);
395 } else
396 #endif
397 if ((unsigned long)mapping & PAGE_MAPPING_ANON)
398 mapping = NULL;
399 return mapping;
400 }
401
402 /* Tracepoints definitions. */
403 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
404 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
405 EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
406 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
407 EXPORT_TRACEPOINT_SYMBOL(kfree);
408 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);