Fix "fs: convert core functions to zero_user_page"
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / mm / filemap_xip.c
CommitLineData
ceffc078
CO
1/*
2 * linux/mm/filemap_xip.c
3 *
4 * Copyright (C) 2005 IBM Corporation
5 * Author: Carsten Otte <cotte@de.ibm.com>
6 *
7 * derived from linux/mm/filemap.c - Copyright (C) Linus Torvalds
8 *
9 */
10
11#include <linux/fs.h>
12#include <linux/pagemap.h>
13#include <linux/module.h>
14#include <linux/uio.h>
15#include <linux/rmap.h>
16#include <asm/tlbflush.h>
17#include "filemap.h"
18
a76c0b97
CO
19/*
20 * We do use our own empty page to avoid interference with other users
21 * of ZERO_PAGE(), such as /dev/zero
22 */
23static struct page *__xip_sparse_page;
24
25static struct page *xip_sparse_page(void)
26{
27 if (!__xip_sparse_page) {
28 unsigned long zeroes = get_zeroed_page(GFP_HIGHUSER);
29 if (zeroes) {
30 static DEFINE_SPINLOCK(xip_alloc_lock);
31 spin_lock(&xip_alloc_lock);
32 if (!__xip_sparse_page)
33 __xip_sparse_page = virt_to_page(zeroes);
34 else
35 free_page(zeroes);
36 spin_unlock(&xip_alloc_lock);
37 }
38 }
39 return __xip_sparse_page;
40}
41
ceffc078
CO
42/*
43 * This is a file read routine for execute in place files, and uses
44 * the mapping->a_ops->get_xip_page() function for the actual low-level
45 * stuff.
46 *
47 * Note the struct file* is not used at all. It may be NULL.
48 */
49static void
50do_xip_mapping_read(struct address_space *mapping,
51 struct file_ra_state *_ra,
52 struct file *filp,
53 loff_t *ppos,
54 read_descriptor_t *desc,
55 read_actor_t actor)
56{
57 struct inode *inode = mapping->host;
58 unsigned long index, end_index, offset;
59 loff_t isize;
60
61 BUG_ON(!mapping->a_ops->get_xip_page);
62
63 index = *ppos >> PAGE_CACHE_SHIFT;
64 offset = *ppos & ~PAGE_CACHE_MASK;
65
66 isize = i_size_read(inode);
67 if (!isize)
68 goto out;
69
70 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
71 for (;;) {
72 struct page *page;
73 unsigned long nr, ret;
74
75 /* nr is the maximum number of bytes to copy from this page */
76 nr = PAGE_CACHE_SIZE;
77 if (index >= end_index) {
78 if (index > end_index)
79 goto out;
80 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
81 if (nr <= offset) {
82 goto out;
83 }
84 }
85 nr = nr - offset;
86
87 page = mapping->a_ops->get_xip_page(mapping,
88 index*(PAGE_SIZE/512), 0);
89 if (!page)
90 goto no_xip_page;
91 if (unlikely(IS_ERR(page))) {
92 if (PTR_ERR(page) == -ENODATA) {
93 /* sparse */
afa597ba 94 page = ZERO_PAGE(0);
ceffc078
CO
95 } else {
96 desc->error = PTR_ERR(page);
97 goto out;
98 }
afa597ba 99 }
ceffc078
CO
100
101 /* If users can be writing to this page using arbitrary
102 * virtual addresses, take care about potential aliasing
103 * before reading the page on the kernel side.
104 */
105 if (mapping_writably_mapped(mapping))
106 flush_dcache_page(page);
107
108 /*
afa597ba 109 * Ok, we have the page, so now we can copy it to user space...
ceffc078
CO
110 *
111 * The actor routine returns how many bytes were actually used..
112 * NOTE! This may not be the same as how much of a user buffer
113 * we filled up (we may be padding etc), so we can only update
114 * "pos" here (the actor routine has to update the user buffer
115 * pointers and the remaining count).
116 */
117 ret = actor(desc, page, offset, nr);
118 offset += ret;
119 index += offset >> PAGE_CACHE_SHIFT;
120 offset &= ~PAGE_CACHE_MASK;
121
122 if (ret == nr && desc->count)
123 continue;
124 goto out;
125
126no_xip_page:
127 /* Did not get the page. Report it */
128 desc->error = -EIO;
129 goto out;
130 }
131
132out:
133 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
134 if (filp)
135 file_accessed(filp);
136}
137
ceffc078 138ssize_t
eb6fe0c3 139xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
ceffc078 140{
eb6fe0c3 141 read_descriptor_t desc;
ceffc078 142
eb6fe0c3
CO
143 if (!access_ok(VERIFY_WRITE, buf, len))
144 return -EFAULT;
ceffc078 145
eb6fe0c3
CO
146 desc.written = 0;
147 desc.arg.buf = buf;
148 desc.count = len;
149 desc.error = 0;
ceffc078 150
eb6fe0c3
CO
151 do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
152 ppos, &desc, file_read_actor);
153
154 if (desc.written)
155 return desc.written;
156 else
157 return desc.error;
ceffc078 158}
eb6fe0c3 159EXPORT_SYMBOL_GPL(xip_file_read);
ceffc078
CO
160
161ssize_t
162xip_file_sendfile(struct file *in_file, loff_t *ppos,
163 size_t count, read_actor_t actor, void *target)
164{
165 read_descriptor_t desc;
166
167 if (!count)
168 return 0;
169
170 desc.written = 0;
171 desc.count = count;
172 desc.arg.data = target;
173 desc.error = 0;
174
175 do_xip_mapping_read(in_file->f_mapping, &in_file->f_ra, in_file,
176 ppos, &desc, actor);
177 if (desc.written)
178 return desc.written;
179 return desc.error;
180}
181EXPORT_SYMBOL_GPL(xip_file_sendfile);
182
183/*
184 * __xip_unmap is invoked from xip_unmap and
185 * xip_write
186 *
187 * This function walks all vmas of the address_space and unmaps the
a76c0b97 188 * __xip_sparse_page when found at pgoff.
ceffc078
CO
189 */
190static void
191__xip_unmap (struct address_space * mapping,
192 unsigned long pgoff)
193{
194 struct vm_area_struct *vma;
195 struct mm_struct *mm;
196 struct prio_tree_iter iter;
197 unsigned long address;
198 pte_t *pte;
199 pte_t pteval;
c0718806 200 spinlock_t *ptl;
67b02f11 201 struct page *page;
ceffc078 202
a76c0b97
CO
203 page = __xip_sparse_page;
204 if (!page)
205 return;
206
ceffc078
CO
207 spin_lock(&mapping->i_mmap_lock);
208 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
209 mm = vma->vm_mm;
210 address = vma->vm_start +
211 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
212 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
c0718806
HD
213 pte = page_check_address(page, mm, address, &ptl);
214 if (pte) {
ceffc078 215 /* Nuke the page table entry. */
082ff0a9 216 flush_cache_page(vma, address, pte_pfn(*pte));
ceffc078 217 pteval = ptep_clear_flush(vma, address, pte);
7de6b805 218 page_remove_rmap(page, vma);
b5810039 219 dec_mm_counter(mm, file_rss);
ceffc078 220 BUG_ON(pte_dirty(pteval));
c0718806 221 pte_unmap_unlock(pte, ptl);
b5810039 222 page_cache_release(page);
ceffc078
CO
223 }
224 }
225 spin_unlock(&mapping->i_mmap_lock);
226}
227
228/*
229 * xip_nopage() is invoked via the vma operations vector for a
230 * mapped memory region to read in file data during a page fault.
231 *
232 * This function is derived from filemap_nopage, but used for execute in place
233 */
234static struct page *
235xip_file_nopage(struct vm_area_struct * area,
236 unsigned long address,
237 int *type)
238{
239 struct file *file = area->vm_file;
240 struct address_space *mapping = file->f_mapping;
241 struct inode *inode = mapping->host;
242 struct page *page;
243 unsigned long size, pgoff, endoff;
244
245 pgoff = ((address - area->vm_start) >> PAGE_CACHE_SHIFT)
246 + area->vm_pgoff;
247 endoff = ((area->vm_end - area->vm_start) >> PAGE_CACHE_SHIFT)
248 + area->vm_pgoff;
249
250 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
a76c0b97
CO
251 if (pgoff >= size)
252 return NOPAGE_SIGBUS;
ceffc078
CO
253
254 page = mapping->a_ops->get_xip_page(mapping, pgoff*(PAGE_SIZE/512), 0);
a76c0b97 255 if (!IS_ERR(page))
b5810039 256 goto out;
ceffc078 257 if (PTR_ERR(page) != -ENODATA)
a76c0b97 258 return NOPAGE_SIGBUS;
ceffc078
CO
259
260 /* sparse block */
261 if ((area->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
262 (area->vm_flags & (VM_SHARED| VM_MAYSHARE)) &&
263 (!(mapping->host->i_sb->s_flags & MS_RDONLY))) {
264 /* maybe shared writable, allocate new block */
265 page = mapping->a_ops->get_xip_page (mapping,
266 pgoff*(PAGE_SIZE/512), 1);
267 if (IS_ERR(page))
a76c0b97 268 return NOPAGE_SIGBUS;
ceffc078
CO
269 /* unmap page at pgoff from all other vmas */
270 __xip_unmap(mapping, pgoff);
271 } else {
a76c0b97
CO
272 /* not shared and writable, use xip_sparse_page() */
273 page = xip_sparse_page();
274 if (!page)
275 return NOPAGE_OOM;
ceffc078
CO
276 }
277
b5810039
NP
278out:
279 page_cache_get(page);
ceffc078
CO
280 return page;
281}
282
283static struct vm_operations_struct xip_file_vm_ops = {
284 .nopage = xip_file_nopage,
285};
286
287int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
288{
289 BUG_ON(!file->f_mapping->a_ops->get_xip_page);
290
291 file_accessed(file);
292 vma->vm_ops = &xip_file_vm_ops;
293 return 0;
294}
295EXPORT_SYMBOL_GPL(xip_file_mmap);
296
297static ssize_t
eb6fe0c3
CO
298__xip_file_write(struct file *filp, const char __user *buf,
299 size_t count, loff_t pos, loff_t *ppos)
ceffc078 300{
eb6fe0c3 301 struct address_space * mapping = filp->f_mapping;
f5e54d6e 302 const struct address_space_operations *a_ops = mapping->a_ops;
ceffc078
CO
303 struct inode *inode = mapping->host;
304 long status = 0;
305 struct page *page;
306 size_t bytes;
ceffc078
CO
307 ssize_t written = 0;
308
309 BUG_ON(!mapping->a_ops->get_xip_page);
310
ceffc078
CO
311 do {
312 unsigned long index;
313 unsigned long offset;
314 size_t copied;
315
316 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
317 index = pos >> PAGE_CACHE_SHIFT;
318 bytes = PAGE_CACHE_SIZE - offset;
319 if (bytes > count)
320 bytes = count;
321
322 /*
323 * Bring in the user page that we will copy from _first_.
324 * Otherwise there's a nasty deadlock on copying from the
325 * same page as we're writing to, without it being marked
326 * up-to-date.
327 */
328 fault_in_pages_readable(buf, bytes);
329
330 page = a_ops->get_xip_page(mapping,
eb6fe0c3 331 index*(PAGE_SIZE/512), 0);
ceffc078
CO
332 if (IS_ERR(page) && (PTR_ERR(page) == -ENODATA)) {
333 /* we allocate a new page unmap it */
334 page = a_ops->get_xip_page(mapping,
eb6fe0c3 335 index*(PAGE_SIZE/512), 1);
ceffc078 336 if (!IS_ERR(page))
eb6fe0c3
CO
337 /* unmap page at pgoff from all other vmas */
338 __xip_unmap(mapping, index);
ceffc078
CO
339 }
340
341 if (IS_ERR(page)) {
342 status = PTR_ERR(page);
343 break;
344 }
345
eb6fe0c3 346 copied = filemap_copy_from_user(page, offset, buf, bytes);
ceffc078
CO
347 flush_dcache_page(page);
348 if (likely(copied > 0)) {
349 status = copied;
350
351 if (status >= 0) {
352 written += status;
353 count -= status;
354 pos += status;
355 buf += status;
ceffc078
CO
356 }
357 }
358 if (unlikely(copied != bytes))
359 if (status >= 0)
360 status = -EFAULT;
361 if (status < 0)
362 break;
363 } while (count);
364 *ppos = pos;
365 /*
366 * No need to use i_size_read() here, the i_size
1b1dcc1b 367 * cannot change under us because we hold i_mutex.
ceffc078
CO
368 */
369 if (pos > inode->i_size) {
370 i_size_write(inode, pos);
371 mark_inode_dirty(inode);
372 }
373
374 return written ? written : status;
375}
376
eb6fe0c3
CO
377ssize_t
378xip_file_write(struct file *filp, const char __user *buf, size_t len,
379 loff_t *ppos)
ceffc078 380{
eb6fe0c3
CO
381 struct address_space *mapping = filp->f_mapping;
382 struct inode *inode = mapping->host;
383 size_t count;
384 loff_t pos;
385 ssize_t ret;
ceffc078 386
1b1dcc1b 387 mutex_lock(&inode->i_mutex);
ceffc078 388
eb6fe0c3
CO
389 if (!access_ok(VERIFY_READ, buf, len)) {
390 ret=-EFAULT;
391 goto out_up;
ceffc078
CO
392 }
393
ceffc078 394 pos = *ppos;
eb6fe0c3 395 count = len;
ceffc078
CO
396
397 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
398
eb6fe0c3
CO
399 /* We can write back this queue in page reclaim */
400 current->backing_dev_info = mapping->backing_dev_info;
ceffc078 401
eb6fe0c3
CO
402 ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode));
403 if (ret)
404 goto out_backing;
ceffc078 405 if (count == 0)
eb6fe0c3 406 goto out_backing;
ceffc078 407
d3ac7f89 408 ret = remove_suid(filp->f_path.dentry);
eb6fe0c3
CO
409 if (ret)
410 goto out_backing;
ceffc078 411
870f4817 412 file_update_time(filp);
ceffc078 413
eb6fe0c3 414 ret = __xip_file_write (filp, buf, count, pos, ppos);
ceffc078 415
eb6fe0c3
CO
416 out_backing:
417 current->backing_dev_info = NULL;
418 out_up:
1b1dcc1b 419 mutex_unlock(&inode->i_mutex);
ceffc078
CO
420 return ret;
421}
eb6fe0c3 422EXPORT_SYMBOL_GPL(xip_file_write);
ceffc078
CO
423
424/*
425 * truncate a page used for execute in place
426 * functionality is analog to block_truncate_page but does use get_xip_page
427 * to get the page instead of page cache
428 */
429int
430xip_truncate_page(struct address_space *mapping, loff_t from)
431{
432 pgoff_t index = from >> PAGE_CACHE_SHIFT;
433 unsigned offset = from & (PAGE_CACHE_SIZE-1);
434 unsigned blocksize;
435 unsigned length;
436 struct page *page;
ceffc078
CO
437
438 BUG_ON(!mapping->a_ops->get_xip_page);
439
440 blocksize = 1 << mapping->host->i_blkbits;
441 length = offset & (blocksize - 1);
442
443 /* Block boundary? Nothing to do */
444 if (!length)
445 return 0;
446
447 length = blocksize - length;
448
449 page = mapping->a_ops->get_xip_page(mapping,
450 index*(PAGE_SIZE/512), 0);
ceffc078 451 if (!page)
eb6fe0c3 452 return -ENOMEM;
ceffc078 453 if (unlikely(IS_ERR(page))) {
eb6fe0c3 454 if (PTR_ERR(page) == -ENODATA)
ceffc078
CO
455 /* Hole? No need to truncate */
456 return 0;
eb6fe0c3
CO
457 else
458 return PTR_ERR(page);
afa597ba 459 }
01f2705d 460 zero_user_page(page, offset, length, KM_USER0);
eb6fe0c3 461 return 0;
ceffc078
CO
462}
463EXPORT_SYMBOL_GPL(xip_truncate_page);