2 * mm/truncate.c - code for taking down pages from address_spaces
4 * Copyright (C) 2002, Linus Torvalds
6 * 10Sep2002 Andrew Morton
10 #include <linux/kernel.h>
11 #include <linux/backing-dev.h>
12 #include <linux/gfp.h>
14 #include <linux/swap.h>
15 #include <linux/export.h>
16 #include <linux/pagemap.h>
17 #include <linux/highmem.h>
18 #include <linux/pagevec.h>
19 #include <linux/task_io_accounting_ops.h>
20 #include <linux/buffer_head.h> /* grr. try_to_release_page,
22 #include <linux/cleancache.h>
23 #include <linux/rmap.h>
28 * do_invalidatepage - invalidate part or all of a page
29 * @page: the page which is affected
30 * @offset: the index of the truncation point
32 * do_invalidatepage() is called when all or part of the page has become
33 * invalidated by a truncate operation.
35 * do_invalidatepage() does not have to release all buffers, but it must
36 * ensure that no dirty buffer is left outside @offset and that no I/O
37 * is underway against any of the blocks which are outside the truncation
38 * point. Because the caller is about to free (and possibly reuse) those
41 void do_invalidatepage(struct page
*page
, unsigned long offset
)
43 void (*invalidatepage
)(struct page
*, unsigned long);
44 invalidatepage
= page
->mapping
->a_ops
->invalidatepage
;
47 invalidatepage
= block_invalidatepage
;
50 (*invalidatepage
)(page
, offset
);
53 static inline void truncate_partial_page(struct page
*page
, unsigned partial
)
55 zero_user_segment(page
, partial
, PAGE_CACHE_SIZE
);
56 cleancache_invalidate_page(page
->mapping
, page
);
57 if (page_has_private(page
))
58 do_invalidatepage(page
, partial
);
62 * This cancels just the dirty bit on the kernel page itself, it
63 * does NOT actually remove dirty bits on any mmap's that may be
64 * around. It also leaves the page tagged dirty, so any sync
65 * activity will still find it on the dirty lists, and in particular,
66 * clear_page_dirty_for_io() will still look at the dirty bits in
69 * Doing this should *normally* only ever be done when a page
70 * is truncated, and is not actually mapped anywhere at all. However,
71 * fs/buffer.c does this when it notices that somebody has cleaned
72 * out all the buffers on a page without actually doing it through
73 * the VM. Can you say "ext3 is horribly ugly"? Tought you could.
75 void cancel_dirty_page(struct page
*page
, unsigned int account_size
)
77 if (TestClearPageDirty(page
)) {
78 struct address_space
*mapping
= page
->mapping
;
79 if (mapping
&& mapping_cap_account_dirty(mapping
)) {
80 dec_zone_page_state(page
, NR_FILE_DIRTY
);
81 dec_bdi_stat(mapping
->backing_dev_info
,
84 task_io_account_cancelled_write(account_size
);
88 EXPORT_SYMBOL(cancel_dirty_page
);
91 * If truncate cannot remove the fs-private metadata from the page, the page
92 * becomes orphaned. It will be left on the LRU and may even be mapped into
93 * user pagetables if we're racing with filemap_fault().
95 * We need to bale out if page->mapping is no longer equal to the original
96 * mapping. This happens a) when the VM reclaimed the page while we waited on
97 * its lock, b) when a concurrent invalidate_mapping_pages got there first and
98 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
101 truncate_complete_page(struct address_space
*mapping
, struct page
*page
)
103 if (page
->mapping
!= mapping
)
106 if (page_has_private(page
))
107 do_invalidatepage(page
, 0);
109 cancel_dirty_page(page
, PAGE_CACHE_SIZE
);
111 ClearPageMappedToDisk(page
);
112 delete_from_page_cache(page
);
117 * This is for invalidate_mapping_pages(). That function can be called at
118 * any time, and is not supposed to throw away dirty pages. But pages can
119 * be marked dirty at any time too, so use remove_mapping which safely
120 * discards clean, unused pages.
122 * Returns non-zero if the page was successfully invalidated.
125 invalidate_complete_page(struct address_space
*mapping
, struct page
*page
)
129 if (page
->mapping
!= mapping
)
132 if (page_has_private(page
) && !try_to_release_page(page
, 0))
135 ret
= remove_mapping(mapping
, page
);
140 int truncate_inode_page(struct address_space
*mapping
, struct page
*page
)
142 if (page_mapped(page
)) {
143 unmap_mapping_range(mapping
,
144 (loff_t
)page
->index
<< PAGE_CACHE_SHIFT
,
147 return truncate_complete_page(mapping
, page
);
151 * Used to get rid of pages on hardware memory corruption.
153 int generic_error_remove_page(struct address_space
*mapping
, struct page
*page
)
158 * Only punch for normal data pages for now.
159 * Handling other types like directories would need more auditing.
161 if (!S_ISREG(mapping
->host
->i_mode
))
163 return truncate_inode_page(mapping
, page
);
165 EXPORT_SYMBOL(generic_error_remove_page
);
168 * Safely invalidate one page from its pagecache mapping.
169 * It only drops clean, unused pages. The page must be locked.
171 * Returns 1 if the page is successfully invalidated, otherwise 0.
173 int invalidate_inode_page(struct page
*page
)
175 struct address_space
*mapping
= page_mapping(page
);
178 if (PageDirty(page
) || PageWriteback(page
))
180 if (page_mapped(page
))
182 return invalidate_complete_page(mapping
, page
);
186 * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
187 * @mapping: mapping to truncate
188 * @lstart: offset from which to truncate
189 * @lend: offset to which to truncate
191 * Truncate the page cache, removing the pages that are between
192 * specified offsets (and zeroing out partial page
193 * (if lstart is not page aligned)).
195 * Truncate takes two passes - the first pass is nonblocking. It will not
196 * block on page locks and it will not block on writeback. The second pass
197 * will wait. This is to prevent as much IO as possible in the affected region.
198 * The first pass will remove most pages, so the search cost of the second pass
201 * We pass down the cache-hot hint to the page freeing code. Even if the
202 * mapping is large, it is probably the case that the final pages are the most
203 * recently touched, and freeing happens in ascending file offset order.
205 void truncate_inode_pages_range(struct address_space
*mapping
,
206 loff_t lstart
, loff_t lend
)
208 const pgoff_t start
= (lstart
+ PAGE_CACHE_SIZE
-1) >> PAGE_CACHE_SHIFT
;
209 const unsigned partial
= lstart
& (PAGE_CACHE_SIZE
- 1);
215 cleancache_invalidate_inode(mapping
);
216 if (mapping
->nrpages
== 0)
219 BUG_ON((lend
& (PAGE_CACHE_SIZE
- 1)) != (PAGE_CACHE_SIZE
- 1));
220 end
= (lend
>> PAGE_CACHE_SHIFT
);
222 pagevec_init(&pvec
, 0);
224 while (index
<= end
&& pagevec_lookup(&pvec
, mapping
, index
,
225 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
- 1) + 1)) {
226 mem_cgroup_uncharge_start();
227 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
228 struct page
*page
= pvec
.pages
[i
];
230 /* We rely upon deletion not changing page->index */
235 if (!trylock_page(page
))
237 WARN_ON(page
->index
!= index
);
238 if (PageWriteback(page
)) {
242 truncate_inode_page(mapping
, page
);
245 pagevec_release(&pvec
);
246 mem_cgroup_uncharge_end();
252 struct page
*page
= find_lock_page(mapping
, start
- 1);
254 wait_on_page_writeback(page
);
255 truncate_partial_page(page
, partial
);
257 page_cache_release(page
);
264 if (!pagevec_lookup(&pvec
, mapping
, index
,
265 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
- 1) + 1)) {
271 if (index
== start
&& pvec
.pages
[0]->index
> end
) {
272 pagevec_release(&pvec
);
275 mem_cgroup_uncharge_start();
276 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
277 struct page
*page
= pvec
.pages
[i
];
279 /* We rely upon deletion not changing page->index */
285 WARN_ON(page
->index
!= index
);
286 wait_on_page_writeback(page
);
287 truncate_inode_page(mapping
, page
);
290 pagevec_release(&pvec
);
291 mem_cgroup_uncharge_end();
294 cleancache_invalidate_inode(mapping
);
296 EXPORT_SYMBOL(truncate_inode_pages_range
);
299 * truncate_inode_pages - truncate *all* the pages from an offset
300 * @mapping: mapping to truncate
301 * @lstart: offset from which to truncate
303 * Called under (and serialised by) inode->i_mutex.
305 * Note: When this function returns, there can be a page in the process of
306 * deletion (inside __delete_from_page_cache()) in the specified range. Thus
307 * mapping->nrpages can be non-zero when this function returns even after
308 * truncation of the whole mapping.
310 void truncate_inode_pages(struct address_space
*mapping
, loff_t lstart
)
312 truncate_inode_pages_range(mapping
, lstart
, (loff_t
)-1);
314 EXPORT_SYMBOL(truncate_inode_pages
);
317 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
318 * @mapping: the address_space which holds the pages to invalidate
319 * @start: the offset 'from' which to invalidate
320 * @end: the offset 'to' which to invalidate (inclusive)
322 * This function only removes the unlocked pages, if you want to
323 * remove all the pages of one inode, you must call truncate_inode_pages.
325 * invalidate_mapping_pages() will not block on IO activity. It will not
326 * invalidate pages which are dirty, locked, under writeback or mapped into
329 unsigned long invalidate_mapping_pages(struct address_space
*mapping
,
330 pgoff_t start
, pgoff_t end
)
333 pgoff_t index
= start
;
335 unsigned long count
= 0;
339 * Note: this function may get called on a shmem/tmpfs mapping:
340 * pagevec_lookup() might then return 0 prematurely (because it
341 * got a gangful of swap entries); but it's hardly worth worrying
342 * about - it can rarely have anything to free from such a mapping
343 * (most pages are dirty), and already skips over any difficulties.
346 pagevec_init(&pvec
, 0);
347 while (index
<= end
&& pagevec_lookup(&pvec
, mapping
, index
,
348 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
- 1) + 1)) {
349 mem_cgroup_uncharge_start();
350 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
351 struct page
*page
= pvec
.pages
[i
];
353 /* We rely upon deletion not changing page->index */
358 if (!trylock_page(page
))
360 WARN_ON(page
->index
!= index
);
361 ret
= invalidate_inode_page(page
);
364 * Invalidation is a hint that the page is no longer
365 * of interest and try to speed up its reclaim.
368 deactivate_page(page
);
371 pagevec_release(&pvec
);
372 mem_cgroup_uncharge_end();
378 EXPORT_SYMBOL(invalidate_mapping_pages
);
381 * This is like invalidate_complete_page(), except it ignores the page's
382 * refcount. We do this because invalidate_inode_pages2() needs stronger
383 * invalidation guarantees, and cannot afford to leave pages behind because
384 * shrink_page_list() has a temp ref on them, or because they're transiently
385 * sitting in the lru_cache_add() pagevecs.
388 invalidate_complete_page2(struct address_space
*mapping
, struct page
*page
)
390 if (page
->mapping
!= mapping
)
393 if (page_has_private(page
) && !try_to_release_page(page
, GFP_KERNEL
))
396 spin_lock_irq(&mapping
->tree_lock
);
400 BUG_ON(page_has_private(page
));
401 __delete_from_page_cache(page
);
402 spin_unlock_irq(&mapping
->tree_lock
);
403 mem_cgroup_uncharge_cache_page(page
);
405 if (mapping
->a_ops
->freepage
)
406 mapping
->a_ops
->freepage(page
);
408 page_cache_release(page
); /* pagecache ref */
411 spin_unlock_irq(&mapping
->tree_lock
);
415 static int do_launder_page(struct address_space
*mapping
, struct page
*page
)
417 if (!PageDirty(page
))
419 if (page
->mapping
!= mapping
|| mapping
->a_ops
->launder_page
== NULL
)
421 return mapping
->a_ops
->launder_page(page
);
425 * invalidate_inode_pages2_range - remove range of pages from an address_space
426 * @mapping: the address_space
427 * @start: the page offset 'from' which to invalidate
428 * @end: the page offset 'to' which to invalidate (inclusive)
430 * Any pages which are found to be mapped into pagetables are unmapped prior to
433 * Returns -EBUSY if any pages could not be invalidated.
435 int invalidate_inode_pages2_range(struct address_space
*mapping
,
436 pgoff_t start
, pgoff_t end
)
443 int did_range_unmap
= 0;
445 cleancache_invalidate_inode(mapping
);
446 pagevec_init(&pvec
, 0);
448 while (index
<= end
&& pagevec_lookup(&pvec
, mapping
, index
,
449 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
- 1) + 1)) {
450 mem_cgroup_uncharge_start();
451 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
452 struct page
*page
= pvec
.pages
[i
];
454 /* We rely upon deletion not changing page->index */
460 WARN_ON(page
->index
!= index
);
461 if (page
->mapping
!= mapping
) {
465 wait_on_page_writeback(page
);
466 if (page_mapped(page
)) {
467 if (!did_range_unmap
) {
469 * Zap the rest of the file in one hit.
471 unmap_mapping_range(mapping
,
472 (loff_t
)index
<< PAGE_CACHE_SHIFT
,
473 (loff_t
)(1 + end
- index
)
481 unmap_mapping_range(mapping
,
482 (loff_t
)index
<< PAGE_CACHE_SHIFT
,
486 BUG_ON(page_mapped(page
));
487 ret2
= do_launder_page(mapping
, page
);
489 if (!invalidate_complete_page2(mapping
, page
))
496 pagevec_release(&pvec
);
497 mem_cgroup_uncharge_end();
501 cleancache_invalidate_inode(mapping
);
504 EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range
);
507 * invalidate_inode_pages2 - remove all pages from an address_space
508 * @mapping: the address_space
510 * Any pages which are found to be mapped into pagetables are unmapped prior to
513 * Returns -EBUSY if any pages could not be invalidated.
515 int invalidate_inode_pages2(struct address_space
*mapping
)
517 return invalidate_inode_pages2_range(mapping
, 0, -1);
519 EXPORT_SYMBOL_GPL(invalidate_inode_pages2
);
522 * truncate_pagecache - unmap and remove pagecache that has been truncated
524 * @oldsize: old file size
525 * @newsize: new file size
527 * inode's new i_size must already be written before truncate_pagecache
530 * This function should typically be called before the filesystem
531 * releases resources associated with the freed range (eg. deallocates
532 * blocks). This way, pagecache will always stay logically coherent
533 * with on-disk format, and the filesystem would not have to deal with
534 * situations such as writepage being called for a page that has already
535 * had its underlying blocks deallocated.
537 void truncate_pagecache(struct inode
*inode
, loff_t oldsize
, loff_t newsize
)
539 struct address_space
*mapping
= inode
->i_mapping
;
540 loff_t holebegin
= round_up(newsize
, PAGE_SIZE
);
543 * unmap_mapping_range is called twice, first simply for
544 * efficiency so that truncate_inode_pages does fewer
545 * single-page unmaps. However after this first call, and
546 * before truncate_inode_pages finishes, it is possible for
547 * private pages to be COWed, which remain after
548 * truncate_inode_pages finishes, hence the second
549 * unmap_mapping_range call must be made for correctness.
551 unmap_mapping_range(mapping
, holebegin
, 0, 1);
552 truncate_inode_pages(mapping
, newsize
);
553 unmap_mapping_range(mapping
, holebegin
, 0, 1);
555 EXPORT_SYMBOL(truncate_pagecache
);
558 * truncate_setsize - update inode and pagecache for a new file size
560 * @newsize: new file size
562 * truncate_setsize updates i_size and performs pagecache truncation (if
563 * necessary) to @newsize. It will be typically be called from the filesystem's
564 * setattr function when ATTR_SIZE is passed in.
566 * Must be called with inode_mutex held and before all filesystem specific
567 * block truncation has been performed.
569 void truncate_setsize(struct inode
*inode
, loff_t newsize
)
571 loff_t oldsize
= inode
->i_size
;
573 i_size_write(inode
, newsize
);
574 if (newsize
> oldsize
)
575 pagecache_isize_extended(inode
, oldsize
, newsize
);
576 truncate_pagecache(inode
, oldsize
, newsize
);
578 EXPORT_SYMBOL(truncate_setsize
);
581 * pagecache_isize_extended - update pagecache after extension of i_size
582 * @inode: inode for which i_size was extended
583 * @from: original inode size
584 * @to: new inode size
586 * Handle extension of inode size either caused by extending truncate or by
587 * write starting after current i_size. We mark the page straddling current
588 * i_size RO so that page_mkwrite() is called on the nearest write access to
589 * the page. This way filesystem can be sure that page_mkwrite() is called on
590 * the page before user writes to the page via mmap after the i_size has been
593 * The function must be called after i_size is updated so that page fault
594 * coming after we unlock the page will already see the new i_size.
595 * The function must be called while we still hold i_mutex - this not only
596 * makes sure i_size is stable but also that userspace cannot observe new
597 * i_size value before we are prepared to store mmap writes at new inode size.
599 void pagecache_isize_extended(struct inode
*inode
, loff_t from
, loff_t to
)
601 int bsize
= 1 << inode
->i_blkbits
;
606 WARN_ON(to
> inode
->i_size
);
608 if (from
>= to
|| bsize
== PAGE_CACHE_SIZE
)
610 /* Page straddling @from will not have any hole block created? */
611 rounded_from
= round_up(from
, bsize
);
612 if (to
<= rounded_from
|| !(rounded_from
& (PAGE_CACHE_SIZE
- 1)))
615 index
= from
>> PAGE_CACHE_SHIFT
;
616 page
= find_lock_page(inode
->i_mapping
, index
);
617 /* Page not cached? Nothing to do */
621 * See clear_page_dirty_for_io() for details why set_page_dirty()
624 if (page_mkclean(page
))
625 set_page_dirty(page
);
627 page_cache_release(page
);
629 EXPORT_SYMBOL(pagecache_isize_extended
);
632 * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
634 * @lstart: offset of beginning of hole
635 * @lend: offset of last byte of hole
637 * This function should typically be called before the filesystem
638 * releases resources associated with the freed range (eg. deallocates
639 * blocks). This way, pagecache will always stay logically coherent
640 * with on-disk format, and the filesystem would not have to deal with
641 * situations such as writepage being called for a page that has already
642 * had its underlying blocks deallocated.
644 void truncate_pagecache_range(struct inode
*inode
, loff_t lstart
, loff_t lend
)
646 struct address_space
*mapping
= inode
->i_mapping
;
647 loff_t unmap_start
= round_up(lstart
, PAGE_SIZE
);
648 loff_t unmap_end
= round_down(1 + lend
, PAGE_SIZE
) - 1;
650 * This rounding is currently just for example: unmap_mapping_range
651 * expands its hole outwards, whereas we want it to contract the hole
652 * inwards. However, existing callers of truncate_pagecache_range are
653 * doing their own page rounding first; and truncate_inode_pages_range
654 * currently BUGs if lend is not pagealigned-1 (it handles partial
655 * page at start of hole, but not partial page at end of hole). Note
656 * unmap_mapping_range allows holelen 0 for all, and we allow lend -1.
660 * Unlike in truncate_pagecache, unmap_mapping_range is called only
661 * once (before truncating pagecache), and without "even_cows" flag:
662 * hole-punching should not remove private COWed pages from the hole.
664 if ((u64
)unmap_end
> (u64
)unmap_start
)
665 unmap_mapping_range(mapping
, unmap_start
,
666 1 + unmap_end
- unmap_start
, 0);
667 truncate_inode_pages_range(mapping
, lstart
, lend
);
669 EXPORT_SYMBOL(truncate_pagecache_range
);