HWPOISON: Add madvise() based injector for hardware poisoned pages v4
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / mm / madvise.c
1 /*
2 * linux/mm/madvise.c
3 *
4 * Copyright (C) 1999 Linus Torvalds
5 * Copyright (C) 2002 Christoph Hellwig
6 */
7
8 #include <linux/mman.h>
9 #include <linux/pagemap.h>
10 #include <linux/syscalls.h>
11 #include <linux/mempolicy.h>
12 #include <linux/hugetlb.h>
13 #include <linux/sched.h>
14
15 /*
16 * Any behaviour which results in changes to the vma->vm_flags needs to
17 * take mmap_sem for writing. Others, which simply traverse vmas, need
18 * to only take it for reading.
19 */
20 static int madvise_need_mmap_write(int behavior)
21 {
22 switch (behavior) {
23 case MADV_REMOVE:
24 case MADV_WILLNEED:
25 case MADV_DONTNEED:
26 return 0;
27 default:
28 /* be safe, default to 1. list exceptions explicitly */
29 return 1;
30 }
31 }
32
33 /*
34 * We can potentially split a vm area into separate
35 * areas, each area with its own behavior.
36 */
37 static long madvise_behavior(struct vm_area_struct * vma,
38 struct vm_area_struct **prev,
39 unsigned long start, unsigned long end, int behavior)
40 {
41 struct mm_struct * mm = vma->vm_mm;
42 int error = 0;
43 pgoff_t pgoff;
44 int new_flags = vma->vm_flags;
45
46 switch (behavior) {
47 case MADV_NORMAL:
48 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
49 break;
50 case MADV_SEQUENTIAL:
51 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
52 break;
53 case MADV_RANDOM:
54 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
55 break;
56 case MADV_DONTFORK:
57 new_flags |= VM_DONTCOPY;
58 break;
59 case MADV_DOFORK:
60 new_flags &= ~VM_DONTCOPY;
61 break;
62 }
63
64 if (new_flags == vma->vm_flags) {
65 *prev = vma;
66 goto out;
67 }
68
69 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
70 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
71 vma->vm_file, pgoff, vma_policy(vma));
72 if (*prev) {
73 vma = *prev;
74 goto success;
75 }
76
77 *prev = vma;
78
79 if (start != vma->vm_start) {
80 error = split_vma(mm, vma, start, 1);
81 if (error)
82 goto out;
83 }
84
85 if (end != vma->vm_end) {
86 error = split_vma(mm, vma, end, 0);
87 if (error)
88 goto out;
89 }
90
91 success:
92 /*
93 * vm_flags is protected by the mmap_sem held in write mode.
94 */
95 vma->vm_flags = new_flags;
96
97 out:
98 if (error == -ENOMEM)
99 error = -EAGAIN;
100 return error;
101 }
102
103 /*
104 * Schedule all required I/O operations. Do not wait for completion.
105 */
106 static long madvise_willneed(struct vm_area_struct * vma,
107 struct vm_area_struct ** prev,
108 unsigned long start, unsigned long end)
109 {
110 struct file *file = vma->vm_file;
111
112 if (!file)
113 return -EBADF;
114
115 if (file->f_mapping->a_ops->get_xip_mem) {
116 /* no bad return value, but ignore advice */
117 return 0;
118 }
119
120 *prev = vma;
121 start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
122 if (end > vma->vm_end)
123 end = vma->vm_end;
124 end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
125
126 force_page_cache_readahead(file->f_mapping, file, start, end - start);
127 return 0;
128 }
129
130 /*
131 * Application no longer needs these pages. If the pages are dirty,
132 * it's OK to just throw them away. The app will be more careful about
133 * data it wants to keep. Be sure to free swap resources too. The
134 * zap_page_range call sets things up for shrink_active_list to actually free
135 * these pages later if no one else has touched them in the meantime,
136 * although we could add these pages to a global reuse list for
137 * shrink_active_list to pick up before reclaiming other pages.
138 *
139 * NB: This interface discards data rather than pushes it out to swap,
140 * as some implementations do. This has performance implications for
141 * applications like large transactional databases which want to discard
142 * pages in anonymous maps after committing to backing store the data
143 * that was kept in them. There is no reason to write this data out to
144 * the swap area if the application is discarding it.
145 *
146 * An interface that causes the system to free clean pages and flush
147 * dirty pages is already available as msync(MS_INVALIDATE).
148 */
149 static long madvise_dontneed(struct vm_area_struct * vma,
150 struct vm_area_struct ** prev,
151 unsigned long start, unsigned long end)
152 {
153 *prev = vma;
154 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
155 return -EINVAL;
156
157 if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
158 struct zap_details details = {
159 .nonlinear_vma = vma,
160 .last_index = ULONG_MAX,
161 };
162 zap_page_range(vma, start, end - start, &details);
163 } else
164 zap_page_range(vma, start, end - start, NULL);
165 return 0;
166 }
167
168 /*
169 * Application wants to free up the pages and associated backing store.
170 * This is effectively punching a hole into the middle of a file.
171 *
172 * NOTE: Currently, only shmfs/tmpfs is supported for this operation.
173 * Other filesystems return -ENOSYS.
174 */
175 static long madvise_remove(struct vm_area_struct *vma,
176 struct vm_area_struct **prev,
177 unsigned long start, unsigned long end)
178 {
179 struct address_space *mapping;
180 loff_t offset, endoff;
181 int error;
182
183 *prev = NULL; /* tell sys_madvise we drop mmap_sem */
184
185 if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB))
186 return -EINVAL;
187
188 if (!vma->vm_file || !vma->vm_file->f_mapping
189 || !vma->vm_file->f_mapping->host) {
190 return -EINVAL;
191 }
192
193 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
194 return -EACCES;
195
196 mapping = vma->vm_file->f_mapping;
197
198 offset = (loff_t)(start - vma->vm_start)
199 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
200 endoff = (loff_t)(end - vma->vm_start - 1)
201 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
202
203 /* vmtruncate_range needs to take i_mutex and i_alloc_sem */
204 up_read(&current->mm->mmap_sem);
205 error = vmtruncate_range(mapping->host, offset, endoff);
206 down_read(&current->mm->mmap_sem);
207 return error;
208 }
209
210 #ifdef CONFIG_MEMORY_FAILURE
211 /*
212 * Error injection support for memory error handling.
213 */
214 static int madvise_hwpoison(unsigned long start, unsigned long end)
215 {
216 int ret = 0;
217
218 if (!capable(CAP_SYS_ADMIN))
219 return -EPERM;
220 for (; start < end; start += PAGE_SIZE) {
221 struct page *p;
222 int ret = get_user_pages(current, current->mm, start, 1,
223 0, 0, &p, NULL);
224 if (ret != 1)
225 return ret;
226 printk(KERN_INFO "Injecting memory failure for page %lx at %lx\n",
227 page_to_pfn(p), start);
228 /* Ignore return value for now */
229 __memory_failure(page_to_pfn(p), 0, 1);
230 put_page(p);
231 }
232 return ret;
233 }
234 #endif
235
236 static long
237 madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
238 unsigned long start, unsigned long end, int behavior)
239 {
240 long error;
241
242 switch (behavior) {
243 case MADV_DOFORK:
244 if (vma->vm_flags & VM_IO) {
245 error = -EINVAL;
246 break;
247 }
248 case MADV_DONTFORK:
249 case MADV_NORMAL:
250 case MADV_SEQUENTIAL:
251 case MADV_RANDOM:
252 error = madvise_behavior(vma, prev, start, end, behavior);
253 break;
254 case MADV_REMOVE:
255 error = madvise_remove(vma, prev, start, end);
256 break;
257
258 case MADV_WILLNEED:
259 error = madvise_willneed(vma, prev, start, end);
260 break;
261
262 case MADV_DONTNEED:
263 error = madvise_dontneed(vma, prev, start, end);
264 break;
265
266 default:
267 BUG();
268 break;
269 }
270 return error;
271 }
272
273 static int
274 madvise_behavior_valid(int behavior)
275 {
276 switch (behavior) {
277 case MADV_DOFORK:
278 case MADV_DONTFORK:
279 case MADV_NORMAL:
280 case MADV_SEQUENTIAL:
281 case MADV_RANDOM:
282 case MADV_REMOVE:
283 case MADV_WILLNEED:
284 case MADV_DONTNEED:
285 return 1;
286
287 default:
288 return 0;
289 }
290 }
291 /*
292 * The madvise(2) system call.
293 *
294 * Applications can use madvise() to advise the kernel how it should
295 * handle paging I/O in this VM area. The idea is to help the kernel
296 * use appropriate read-ahead and caching techniques. The information
297 * provided is advisory only, and can be safely disregarded by the
298 * kernel without affecting the correct operation of the application.
299 *
300 * behavior values:
301 * MADV_NORMAL - the default behavior is to read clusters. This
302 * results in some read-ahead and read-behind.
303 * MADV_RANDOM - the system should read the minimum amount of data
304 * on any access, since it is unlikely that the appli-
305 * cation will need more than what it asks for.
306 * MADV_SEQUENTIAL - pages in the given range will probably be accessed
307 * once, so they can be aggressively read ahead, and
308 * can be freed soon after they are accessed.
309 * MADV_WILLNEED - the application is notifying the system to read
310 * some pages ahead.
311 * MADV_DONTNEED - the application is finished with the given range,
312 * so the kernel can free resources associated with it.
313 * MADV_REMOVE - the application wants to free up the given range of
314 * pages and associated backing store.
315 *
316 * return values:
317 * zero - success
318 * -EINVAL - start + len < 0, start is not page-aligned,
319 * "behavior" is not a valid value, or application
320 * is attempting to release locked or shared pages.
321 * -ENOMEM - addresses in the specified range are not currently
322 * mapped, or are outside the AS of the process.
323 * -EIO - an I/O error occurred while paging in data.
324 * -EBADF - map exists, but area maps something that isn't a file.
325 * -EAGAIN - a kernel resource was temporarily unavailable.
326 */
327 SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
328 {
329 unsigned long end, tmp;
330 struct vm_area_struct * vma, *prev;
331 int unmapped_error = 0;
332 int error = -EINVAL;
333 int write;
334 size_t len;
335
336 #ifdef CONFIG_MEMORY_FAILURE
337 if (behavior == MADV_HWPOISON)
338 return madvise_hwpoison(start, start+len_in);
339 #endif
340 if (!madvise_behavior_valid(behavior))
341 return error;
342
343 write = madvise_need_mmap_write(behavior);
344 if (write)
345 down_write(&current->mm->mmap_sem);
346 else
347 down_read(&current->mm->mmap_sem);
348
349 if (start & ~PAGE_MASK)
350 goto out;
351 len = (len_in + ~PAGE_MASK) & PAGE_MASK;
352
353 /* Check to see whether len was rounded up from small -ve to zero */
354 if (len_in && !len)
355 goto out;
356
357 end = start + len;
358 if (end < start)
359 goto out;
360
361 error = 0;
362 if (end == start)
363 goto out;
364
365 /*
366 * If the interval [start,end) covers some unmapped address
367 * ranges, just ignore them, but return -ENOMEM at the end.
368 * - different from the way of handling in mlock etc.
369 */
370 vma = find_vma_prev(current->mm, start, &prev);
371 if (vma && start > vma->vm_start)
372 prev = vma;
373
374 for (;;) {
375 /* Still start < end. */
376 error = -ENOMEM;
377 if (!vma)
378 goto out;
379
380 /* Here start < (end|vma->vm_end). */
381 if (start < vma->vm_start) {
382 unmapped_error = -ENOMEM;
383 start = vma->vm_start;
384 if (start >= end)
385 goto out;
386 }
387
388 /* Here vma->vm_start <= start < (end|vma->vm_end) */
389 tmp = vma->vm_end;
390 if (end < tmp)
391 tmp = end;
392
393 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
394 error = madvise_vma(vma, &prev, start, tmp, behavior);
395 if (error)
396 goto out;
397 start = tmp;
398 if (prev && start < prev->vm_end)
399 start = prev->vm_end;
400 error = unmapped_error;
401 if (start >= end)
402 goto out;
403 if (prev)
404 vma = prev->vm_next;
405 else /* madvise_remove dropped mmap_sem */
406 vma = find_vma(current->mm, start);
407 }
408 out:
409 if (write)
410 up_write(&current->mm->mmap_sem);
411 else
412 up_read(&current->mm->mmap_sem);
413
414 return error;
415 }