2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "xfs_trans.h"
26 #include "xfs_dmapi.h"
27 #include "xfs_mount.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_alloc_btree.h"
30 #include "xfs_ialloc_btree.h"
31 #include "xfs_dir2_sf.h"
32 #include "xfs_attr_sf.h"
33 #include "xfs_dinode.h"
34 #include "xfs_inode.h"
35 #include "xfs_alloc.h"
36 #include "xfs_btree.h"
37 #include "xfs_error.h"
39 #include "xfs_iomap.h"
40 #include "xfs_vnodeops.h"
41 #include <linux/mpage.h>
42 #include <linux/pagevec.h>
43 #include <linux/writeback.h>
47 * Prime number of hash buckets since address is used as the key.
50 #define to_ioend_wq(v) (&xfs_ioend_wq[((unsigned long)v) % NVSYNC])
51 static wait_queue_head_t xfs_ioend_wq
[NVSYNC
];
58 for (i
= 0; i
< NVSYNC
; i
++)
59 init_waitqueue_head(&xfs_ioend_wq
[i
]);
66 wait_queue_head_t
*wq
= to_ioend_wq(ip
);
68 wait_event(*wq
, (atomic_read(&ip
->i_iocount
) == 0));
75 if (atomic_dec_and_test(&ip
->i_iocount
))
76 wake_up(to_ioend_wq(ip
));
86 struct buffer_head
*bh
, *head
;
88 *delalloc
= *unmapped
= *unwritten
= 0;
90 bh
= head
= page_buffers(page
);
92 if (buffer_uptodate(bh
) && !buffer_mapped(bh
))
94 else if (buffer_unwritten(bh
))
96 else if (buffer_delay(bh
))
98 } while ((bh
= bh
->b_this_page
) != head
);
101 #if defined(XFS_RW_TRACE)
110 loff_t isize
= i_size_read(inode
);
111 loff_t offset
= page_offset(page
);
112 int delalloc
= -1, unmapped
= -1, unwritten
= -1;
114 if (page_has_buffers(page
))
115 xfs_count_page_state(page
, &delalloc
, &unmapped
, &unwritten
);
121 ktrace_enter(ip
->i_rwtrace
,
122 (void *)((unsigned long)tag
),
127 (void *)((unsigned long)((ip
->i_d
.di_size
>> 32) & 0xffffffff)),
128 (void *)((unsigned long)(ip
->i_d
.di_size
& 0xffffffff)),
129 (void *)((unsigned long)((isize
>> 32) & 0xffffffff)),
130 (void *)((unsigned long)(isize
& 0xffffffff)),
131 (void *)((unsigned long)((offset
>> 32) & 0xffffffff)),
132 (void *)((unsigned long)(offset
& 0xffffffff)),
133 (void *)((unsigned long)delalloc
),
134 (void *)((unsigned long)unmapped
),
135 (void *)((unsigned long)unwritten
),
136 (void *)((unsigned long)current_pid()),
140 #define xfs_page_trace(tag, inode, page, pgoff)
143 STATIC
struct block_device
*
144 xfs_find_bdev_for_inode(
145 struct xfs_inode
*ip
)
147 struct xfs_mount
*mp
= ip
->i_mount
;
149 if (XFS_IS_REALTIME_INODE(ip
))
150 return mp
->m_rtdev_targp
->bt_bdev
;
152 return mp
->m_ddev_targp
->bt_bdev
;
156 * We're now finished for good with this ioend structure.
157 * Update the page state via the associated buffer_heads,
158 * release holds on the inode and bio, and finally free
159 * up memory. Do not use the ioend after this.
165 struct buffer_head
*bh
, *next
;
166 struct xfs_inode
*ip
= XFS_I(ioend
->io_inode
);
168 for (bh
= ioend
->io_buffer_head
; bh
; bh
= next
) {
169 next
= bh
->b_private
;
170 bh
->b_end_io(bh
, !ioend
->io_error
);
174 * Volume managers supporting multiple paths can send back ENODEV
175 * when the final path disappears. In this case continuing to fill
176 * the page cache with dirty data which cannot be written out is
177 * evil, so prevent that.
179 if (unlikely(ioend
->io_error
== -ENODEV
)) {
180 xfs_do_force_shutdown(ip
->i_mount
, SHUTDOWN_DEVICE_REQ
,
185 mempool_free(ioend
, xfs_ioend_pool
);
189 * Update on-disk file size now that data has been written to disk.
190 * The current in-memory file size is i_size. If a write is beyond
191 * eof i_new_size will be the intended file size until i_size is
192 * updated. If this write does not extend all the way to the valid
193 * file size then restrict this update to the end of the write.
199 xfs_inode_t
*ip
= XFS_I(ioend
->io_inode
);
203 ASSERT((ip
->i_d
.di_mode
& S_IFMT
) == S_IFREG
);
204 ASSERT(ioend
->io_type
!= IOMAP_READ
);
206 if (unlikely(ioend
->io_error
))
209 bsize
= ioend
->io_offset
+ ioend
->io_size
;
211 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
213 isize
= MAX(ip
->i_size
, ip
->i_new_size
);
214 isize
= MIN(isize
, bsize
);
216 if (ip
->i_d
.di_size
< isize
) {
217 ip
->i_d
.di_size
= isize
;
218 ip
->i_update_core
= 1;
219 xfs_mark_inode_dirty_sync(ip
);
222 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
226 * Buffered IO write completion for delayed allocate extents.
229 xfs_end_bio_delalloc(
230 struct work_struct
*work
)
233 container_of(work
, xfs_ioend_t
, io_work
);
235 xfs_setfilesize(ioend
);
236 xfs_destroy_ioend(ioend
);
240 * Buffered IO write completion for regular, written extents.
244 struct work_struct
*work
)
247 container_of(work
, xfs_ioend_t
, io_work
);
249 xfs_setfilesize(ioend
);
250 xfs_destroy_ioend(ioend
);
254 * IO write completion for unwritten extents.
256 * Issue transactions to convert a buffer range from unwritten
257 * to written extents.
260 xfs_end_bio_unwritten(
261 struct work_struct
*work
)
264 container_of(work
, xfs_ioend_t
, io_work
);
265 struct xfs_inode
*ip
= XFS_I(ioend
->io_inode
);
266 xfs_off_t offset
= ioend
->io_offset
;
267 size_t size
= ioend
->io_size
;
269 if (likely(!ioend
->io_error
)) {
270 if (!XFS_FORCED_SHUTDOWN(ip
->i_mount
)) {
272 error
= xfs_iomap_write_unwritten(ip
, offset
, size
);
274 ioend
->io_error
= error
;
276 xfs_setfilesize(ioend
);
278 xfs_destroy_ioend(ioend
);
282 * IO read completion for regular, written extents.
286 struct work_struct
*work
)
289 container_of(work
, xfs_ioend_t
, io_work
);
291 xfs_destroy_ioend(ioend
);
295 * Schedule IO completion handling on a xfsdatad if this was
296 * the final hold on this ioend. If we are asked to wait,
297 * flush the workqueue.
304 if (atomic_dec_and_test(&ioend
->io_remaining
)) {
305 struct workqueue_struct
*wq
= xfsdatad_workqueue
;
306 if (ioend
->io_work
.func
== xfs_end_bio_unwritten
)
307 wq
= xfsconvertd_workqueue
;
309 queue_work(wq
, &ioend
->io_work
);
316 * Allocate and initialise an IO completion structure.
317 * We need to track unwritten extent write completion here initially.
318 * We'll need to extend this for updating the ondisk inode size later
328 ioend
= mempool_alloc(xfs_ioend_pool
, GFP_NOFS
);
331 * Set the count to 1 initially, which will prevent an I/O
332 * completion callback from happening before we have started
333 * all the I/O from calling the completion routine too early.
335 atomic_set(&ioend
->io_remaining
, 1);
337 ioend
->io_list
= NULL
;
338 ioend
->io_type
= type
;
339 ioend
->io_inode
= inode
;
340 ioend
->io_buffer_head
= NULL
;
341 ioend
->io_buffer_tail
= NULL
;
342 atomic_inc(&XFS_I(ioend
->io_inode
)->i_iocount
);
343 ioend
->io_offset
= 0;
346 if (type
== IOMAP_UNWRITTEN
)
347 INIT_WORK(&ioend
->io_work
, xfs_end_bio_unwritten
);
348 else if (type
== IOMAP_DELAY
)
349 INIT_WORK(&ioend
->io_work
, xfs_end_bio_delalloc
);
350 else if (type
== IOMAP_READ
)
351 INIT_WORK(&ioend
->io_work
, xfs_end_bio_read
);
353 INIT_WORK(&ioend
->io_work
, xfs_end_bio_written
);
368 return -xfs_iomap(XFS_I(inode
), offset
, count
, flags
, mapp
, &nmaps
);
376 return offset
>= iomapp
->iomap_offset
&&
377 offset
< iomapp
->iomap_offset
+ iomapp
->iomap_bsize
;
381 * BIO completion handler for buffered IO.
388 xfs_ioend_t
*ioend
= bio
->bi_private
;
390 ASSERT(atomic_read(&bio
->bi_cnt
) >= 1);
391 ioend
->io_error
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
) ? 0 : error
;
393 /* Toss bio and pass work off to an xfsdatad thread */
394 bio
->bi_private
= NULL
;
395 bio
->bi_end_io
= NULL
;
398 xfs_finish_ioend(ioend
, 0);
402 xfs_submit_ioend_bio(
406 atomic_inc(&ioend
->io_remaining
);
408 bio
->bi_private
= ioend
;
409 bio
->bi_end_io
= xfs_end_bio
;
411 submit_bio(WRITE
, bio
);
412 ASSERT(!bio_flagged(bio
, BIO_EOPNOTSUPP
));
418 struct buffer_head
*bh
)
421 int nvecs
= bio_get_nr_vecs(bh
->b_bdev
);
424 bio
= bio_alloc(GFP_NOIO
, nvecs
);
428 ASSERT(bio
->bi_private
== NULL
);
429 bio
->bi_sector
= bh
->b_blocknr
* (bh
->b_size
>> 9);
430 bio
->bi_bdev
= bh
->b_bdev
;
436 xfs_start_buffer_writeback(
437 struct buffer_head
*bh
)
439 ASSERT(buffer_mapped(bh
));
440 ASSERT(buffer_locked(bh
));
441 ASSERT(!buffer_delay(bh
));
442 ASSERT(!buffer_unwritten(bh
));
444 mark_buffer_async_write(bh
);
445 set_buffer_uptodate(bh
);
446 clear_buffer_dirty(bh
);
450 xfs_start_page_writeback(
455 ASSERT(PageLocked(page
));
456 ASSERT(!PageWriteback(page
));
458 clear_page_dirty_for_io(page
);
459 set_page_writeback(page
);
461 /* If no buffers on the page are to be written, finish it here */
463 end_page_writeback(page
);
466 static inline int bio_add_buffer(struct bio
*bio
, struct buffer_head
*bh
)
468 return bio_add_page(bio
, bh
->b_page
, bh
->b_size
, bh_offset(bh
));
472 * Submit all of the bios for all of the ioends we have saved up, covering the
473 * initial writepage page and also any probed pages.
475 * Because we may have multiple ioends spanning a page, we need to start
476 * writeback on all the buffers before we submit them for I/O. If we mark the
477 * buffers as we got, then we can end up with a page that only has buffers
478 * marked async write and I/O complete on can occur before we mark the other
479 * buffers async write.
481 * The end result of this is that we trip a bug in end_page_writeback() because
482 * we call it twice for the one page as the code in end_buffer_async_write()
483 * assumes that all buffers on the page are started at the same time.
485 * The fix is two passes across the ioend list - one to start writeback on the
486 * buffer_heads, and then submit them for I/O on the second pass.
492 xfs_ioend_t
*head
= ioend
;
494 struct buffer_head
*bh
;
496 sector_t lastblock
= 0;
498 /* Pass 1 - start writeback */
500 next
= ioend
->io_list
;
501 for (bh
= ioend
->io_buffer_head
; bh
; bh
= bh
->b_private
) {
502 xfs_start_buffer_writeback(bh
);
504 } while ((ioend
= next
) != NULL
);
506 /* Pass 2 - submit I/O */
509 next
= ioend
->io_list
;
512 for (bh
= ioend
->io_buffer_head
; bh
; bh
= bh
->b_private
) {
516 bio
= xfs_alloc_ioend_bio(bh
);
517 } else if (bh
->b_blocknr
!= lastblock
+ 1) {
518 xfs_submit_ioend_bio(ioend
, bio
);
522 if (bio_add_buffer(bio
, bh
) != bh
->b_size
) {
523 xfs_submit_ioend_bio(ioend
, bio
);
527 lastblock
= bh
->b_blocknr
;
530 xfs_submit_ioend_bio(ioend
, bio
);
531 xfs_finish_ioend(ioend
, 0);
532 } while ((ioend
= next
) != NULL
);
536 * Cancel submission of all buffer_heads so far in this endio.
537 * Toss the endio too. Only ever called for the initial page
538 * in a writepage request, so only ever one page.
545 struct buffer_head
*bh
, *next_bh
;
548 next
= ioend
->io_list
;
549 bh
= ioend
->io_buffer_head
;
551 next_bh
= bh
->b_private
;
552 clear_buffer_async_write(bh
);
554 } while ((bh
= next_bh
) != NULL
);
556 xfs_ioend_wake(XFS_I(ioend
->io_inode
));
557 mempool_free(ioend
, xfs_ioend_pool
);
558 } while ((ioend
= next
) != NULL
);
562 * Test to see if we've been building up a completion structure for
563 * earlier buffers -- if so, we try to append to this ioend if we
564 * can, otherwise we finish off any current ioend and start another.
565 * Return true if we've finished the given ioend.
570 struct buffer_head
*bh
,
573 xfs_ioend_t
**result
,
576 xfs_ioend_t
*ioend
= *result
;
578 if (!ioend
|| need_ioend
|| type
!= ioend
->io_type
) {
579 xfs_ioend_t
*previous
= *result
;
581 ioend
= xfs_alloc_ioend(inode
, type
);
582 ioend
->io_offset
= offset
;
583 ioend
->io_buffer_head
= bh
;
584 ioend
->io_buffer_tail
= bh
;
586 previous
->io_list
= ioend
;
589 ioend
->io_buffer_tail
->b_private
= bh
;
590 ioend
->io_buffer_tail
= bh
;
593 bh
->b_private
= NULL
;
594 ioend
->io_size
+= bh
->b_size
;
599 struct buffer_head
*bh
,
606 ASSERT(mp
->iomap_bn
!= IOMAP_DADDR_NULL
);
608 bn
= (mp
->iomap_bn
>> (block_bits
- BBSHIFT
)) +
609 ((offset
- mp
->iomap_offset
) >> block_bits
);
611 ASSERT(bn
|| (mp
->iomap_flags
& IOMAP_REALTIME
));
614 set_buffer_mapped(bh
);
619 struct buffer_head
*bh
,
624 ASSERT(!(iomapp
->iomap_flags
& IOMAP_HOLE
));
625 ASSERT(!(iomapp
->iomap_flags
& IOMAP_DELAY
));
628 xfs_map_buffer(bh
, iomapp
, offset
, block_bits
);
629 bh
->b_bdev
= iomapp
->iomap_target
->bt_bdev
;
630 set_buffer_mapped(bh
);
631 clear_buffer_delay(bh
);
632 clear_buffer_unwritten(bh
);
636 * Look for a page at index that is suitable for clustering.
641 unsigned int pg_offset
,
646 if (PageWriteback(page
))
649 if (page
->mapping
&& PageDirty(page
)) {
650 if (page_has_buffers(page
)) {
651 struct buffer_head
*bh
, *head
;
653 bh
= head
= page_buffers(page
);
655 if (!buffer_uptodate(bh
))
657 if (mapped
!= buffer_mapped(bh
))
660 if (ret
>= pg_offset
)
662 } while ((bh
= bh
->b_this_page
) != head
);
664 ret
= mapped
? 0 : PAGE_CACHE_SIZE
;
673 struct page
*startpage
,
674 struct buffer_head
*bh
,
675 struct buffer_head
*head
,
679 pgoff_t tindex
, tlast
, tloff
;
683 /* First sum forwards in this page */
685 if (!buffer_uptodate(bh
) || (mapped
!= buffer_mapped(bh
)))
688 } while ((bh
= bh
->b_this_page
) != head
);
690 /* if we reached the end of the page, sum forwards in following pages */
691 tlast
= i_size_read(inode
) >> PAGE_CACHE_SHIFT
;
692 tindex
= startpage
->index
+ 1;
694 /* Prune this back to avoid pathological behavior */
695 tloff
= min(tlast
, startpage
->index
+ 64);
697 pagevec_init(&pvec
, 0);
698 while (!done
&& tindex
<= tloff
) {
699 unsigned len
= min_t(pgoff_t
, PAGEVEC_SIZE
, tlast
- tindex
+ 1);
701 if (!pagevec_lookup(&pvec
, inode
->i_mapping
, tindex
, len
))
704 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
705 struct page
*page
= pvec
.pages
[i
];
706 size_t pg_offset
, pg_len
= 0;
708 if (tindex
== tlast
) {
710 i_size_read(inode
) & (PAGE_CACHE_SIZE
- 1);
716 pg_offset
= PAGE_CACHE_SIZE
;
718 if (page
->index
== tindex
&& trylock_page(page
)) {
719 pg_len
= xfs_probe_page(page
, pg_offset
, mapped
);
732 pagevec_release(&pvec
);
740 * Test if a given page is suitable for writing as part of an unwritten
741 * or delayed allocate extent.
748 if (PageWriteback(page
))
751 if (page
->mapping
&& page_has_buffers(page
)) {
752 struct buffer_head
*bh
, *head
;
755 bh
= head
= page_buffers(page
);
757 if (buffer_unwritten(bh
))
758 acceptable
= (type
== IOMAP_UNWRITTEN
);
759 else if (buffer_delay(bh
))
760 acceptable
= (type
== IOMAP_DELAY
);
761 else if (buffer_dirty(bh
) && buffer_mapped(bh
))
762 acceptable
= (type
== IOMAP_NEW
);
765 } while ((bh
= bh
->b_this_page
) != head
);
775 * Allocate & map buffers for page given the extent map. Write it out.
776 * except for the original page of a writepage, this is called on
777 * delalloc/unwritten pages only, for the original page it is possible
778 * that the page has no mapping at all.
786 xfs_ioend_t
**ioendp
,
787 struct writeback_control
*wbc
,
791 struct buffer_head
*bh
, *head
;
792 xfs_off_t end_offset
;
793 unsigned long p_offset
;
795 int bbits
= inode
->i_blkbits
;
797 int count
= 0, done
= 0, uptodate
= 1;
798 xfs_off_t offset
= page_offset(page
);
800 if (page
->index
!= tindex
)
802 if (!trylock_page(page
))
804 if (PageWriteback(page
))
805 goto fail_unlock_page
;
806 if (page
->mapping
!= inode
->i_mapping
)
807 goto fail_unlock_page
;
808 if (!xfs_is_delayed_page(page
, (*ioendp
)->io_type
))
809 goto fail_unlock_page
;
812 * page_dirty is initially a count of buffers on the page before
813 * EOF and is decremented as we move each into a cleanable state.
817 * End offset is the highest offset that this page should represent.
818 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
819 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
820 * hence give us the correct page_dirty count. On any other page,
821 * it will be zero and in that case we need page_dirty to be the
822 * count of buffers on the page.
824 end_offset
= min_t(unsigned long long,
825 (xfs_off_t
)(page
->index
+ 1) << PAGE_CACHE_SHIFT
,
828 len
= 1 << inode
->i_blkbits
;
829 p_offset
= min_t(unsigned long, end_offset
& (PAGE_CACHE_SIZE
- 1),
831 p_offset
= p_offset
? roundup(p_offset
, len
) : PAGE_CACHE_SIZE
;
832 page_dirty
= p_offset
/ len
;
834 bh
= head
= page_buffers(page
);
836 if (offset
>= end_offset
)
838 if (!buffer_uptodate(bh
))
840 if (!(PageUptodate(page
) || buffer_uptodate(bh
))) {
845 if (buffer_unwritten(bh
) || buffer_delay(bh
)) {
846 if (buffer_unwritten(bh
))
847 type
= IOMAP_UNWRITTEN
;
851 if (!xfs_iomap_valid(mp
, offset
)) {
856 ASSERT(!(mp
->iomap_flags
& IOMAP_HOLE
));
857 ASSERT(!(mp
->iomap_flags
& IOMAP_DELAY
));
859 xfs_map_at_offset(bh
, offset
, bbits
, mp
);
861 xfs_add_to_ioend(inode
, bh
, offset
,
864 set_buffer_dirty(bh
);
866 mark_buffer_dirty(bh
);
872 if (buffer_mapped(bh
) && all_bh
&& startio
) {
874 xfs_add_to_ioend(inode
, bh
, offset
,
882 } while (offset
+= len
, (bh
= bh
->b_this_page
) != head
);
884 if (uptodate
&& bh
== head
)
885 SetPageUptodate(page
);
889 struct backing_dev_info
*bdi
;
891 bdi
= inode
->i_mapping
->backing_dev_info
;
893 if (bdi_write_congested(bdi
)) {
894 wbc
->encountered_congestion
= 1;
896 } else if (wbc
->nr_to_write
<= 0) {
900 xfs_start_page_writeback(page
, !page_dirty
, count
);
911 * Convert & write out a cluster of pages in the same extent as defined
912 * by mp and following the start page.
919 xfs_ioend_t
**ioendp
,
920 struct writeback_control
*wbc
,
928 pagevec_init(&pvec
, 0);
929 while (!done
&& tindex
<= tlast
) {
930 unsigned len
= min_t(pgoff_t
, PAGEVEC_SIZE
, tlast
- tindex
+ 1);
932 if (!pagevec_lookup(&pvec
, inode
->i_mapping
, tindex
, len
))
935 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
936 done
= xfs_convert_page(inode
, pvec
.pages
[i
], tindex
++,
937 iomapp
, ioendp
, wbc
, startio
, all_bh
);
942 pagevec_release(&pvec
);
948 * Calling this without startio set means we are being asked to make a dirty
949 * page ready for freeing it's buffers. When called with startio set then
950 * we are coming from writepage.
952 * When called with startio set it is important that we write the WHOLE
954 * The bh->b_state's cannot know if any of the blocks or which block for
955 * that matter are dirty due to mmap writes, and therefore bh uptodate is
956 * only valid if the page itself isn't completely uptodate. Some layers
957 * may clear the page dirty flag prior to calling write page, under the
958 * assumption the entire page will be written out; by not writing out the
959 * whole page the page can be reused before all valid dirty data is
960 * written out. Note: in the case of a page that has been dirty'd by
961 * mapwrite and but partially setup by block_prepare_write the
962 * bh->b_states's will not agree and only ones setup by BPW/BCW will have
963 * valid state, thus the whole page must be written out thing.
967 xfs_page_state_convert(
970 struct writeback_control
*wbc
,
972 int unmapped
) /* also implies page uptodate */
974 struct buffer_head
*bh
, *head
;
976 xfs_ioend_t
*ioend
= NULL
, *iohead
= NULL
;
978 unsigned long p_offset
= 0;
980 __uint64_t end_offset
;
981 pgoff_t end_index
, last_index
, tlast
;
983 int flags
, err
, iomap_valid
= 0, uptodate
= 1;
984 int page_dirty
, count
= 0;
986 int all_bh
= unmapped
;
989 if (wbc
->sync_mode
== WB_SYNC_NONE
&& wbc
->nonblocking
)
990 trylock
|= BMAPI_TRYLOCK
;
993 /* Is this page beyond the end of the file? */
994 offset
= i_size_read(inode
);
995 end_index
= offset
>> PAGE_CACHE_SHIFT
;
996 last_index
= (offset
- 1) >> PAGE_CACHE_SHIFT
;
997 if (page
->index
>= end_index
) {
998 if ((page
->index
>= end_index
+ 1) ||
999 !(i_size_read(inode
) & (PAGE_CACHE_SIZE
- 1))) {
1007 * page_dirty is initially a count of buffers on the page before
1008 * EOF and is decremented as we move each into a cleanable state.
1012 * End offset is the highest offset that this page should represent.
1013 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
1014 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
1015 * hence give us the correct page_dirty count. On any other page,
1016 * it will be zero and in that case we need page_dirty to be the
1017 * count of buffers on the page.
1019 end_offset
= min_t(unsigned long long,
1020 (xfs_off_t
)(page
->index
+ 1) << PAGE_CACHE_SHIFT
, offset
);
1021 len
= 1 << inode
->i_blkbits
;
1022 p_offset
= min_t(unsigned long, end_offset
& (PAGE_CACHE_SIZE
- 1),
1024 p_offset
= p_offset
? roundup(p_offset
, len
) : PAGE_CACHE_SIZE
;
1025 page_dirty
= p_offset
/ len
;
1027 bh
= head
= page_buffers(page
);
1028 offset
= page_offset(page
);
1032 /* TODO: cleanup count and page_dirty */
1035 if (offset
>= end_offset
)
1037 if (!buffer_uptodate(bh
))
1039 if (!(PageUptodate(page
) || buffer_uptodate(bh
)) && !startio
) {
1041 * the iomap is actually still valid, but the ioend
1042 * isn't. shouldn't happen too often.
1049 iomap_valid
= xfs_iomap_valid(&iomap
, offset
);
1052 * First case, map an unwritten extent and prepare for
1053 * extent state conversion transaction on completion.
1055 * Second case, allocate space for a delalloc buffer.
1056 * We can return EAGAIN here in the release page case.
1058 * Third case, an unmapped buffer was found, and we are
1059 * in a path where we need to write the whole page out.
1061 if (buffer_unwritten(bh
) || buffer_delay(bh
) ||
1062 ((buffer_uptodate(bh
) || PageUptodate(page
)) &&
1063 !buffer_mapped(bh
) && (unmapped
|| startio
))) {
1067 * Make sure we don't use a read-only iomap
1069 if (flags
== BMAPI_READ
)
1072 if (buffer_unwritten(bh
)) {
1073 type
= IOMAP_UNWRITTEN
;
1074 flags
= BMAPI_WRITE
| BMAPI_IGNSTATE
;
1075 } else if (buffer_delay(bh
)) {
1077 flags
= BMAPI_ALLOCATE
| trylock
;
1080 flags
= BMAPI_WRITE
| BMAPI_MMAP
;
1085 * if we didn't have a valid mapping then we
1086 * need to ensure that we put the new mapping
1087 * in a new ioend structure. This needs to be
1088 * done to ensure that the ioends correctly
1089 * reflect the block mappings at io completion
1090 * for unwritten extent conversion.
1093 if (type
== IOMAP_NEW
) {
1094 size
= xfs_probe_cluster(inode
,
1100 err
= xfs_map_blocks(inode
, offset
, size
,
1104 iomap_valid
= xfs_iomap_valid(&iomap
, offset
);
1107 xfs_map_at_offset(bh
, offset
,
1108 inode
->i_blkbits
, &iomap
);
1110 xfs_add_to_ioend(inode
, bh
, offset
,
1114 set_buffer_dirty(bh
);
1116 mark_buffer_dirty(bh
);
1121 } else if (buffer_uptodate(bh
) && startio
) {
1123 * we got here because the buffer is already mapped.
1124 * That means it must already have extents allocated
1125 * underneath it. Map the extent by reading it.
1127 if (!iomap_valid
|| flags
!= BMAPI_READ
) {
1129 size
= xfs_probe_cluster(inode
, page
, bh
,
1131 err
= xfs_map_blocks(inode
, offset
, size
,
1135 iomap_valid
= xfs_iomap_valid(&iomap
, offset
);
1139 * We set the type to IOMAP_NEW in case we are doing a
1140 * small write at EOF that is extending the file but
1141 * without needing an allocation. We need to update the
1142 * file size on I/O completion in this case so it is
1143 * the same case as having just allocated a new extent
1144 * that we are writing into for the first time.
1147 if (trylock_buffer(bh
)) {
1148 ASSERT(buffer_mapped(bh
));
1151 xfs_add_to_ioend(inode
, bh
, offset
, type
,
1152 &ioend
, !iomap_valid
);
1158 } else if ((buffer_uptodate(bh
) || PageUptodate(page
)) &&
1159 (unmapped
|| startio
)) {
1166 } while (offset
+= len
, ((bh
= bh
->b_this_page
) != head
));
1168 if (uptodate
&& bh
== head
)
1169 SetPageUptodate(page
);
1172 xfs_start_page_writeback(page
, 1, count
);
1174 if (ioend
&& iomap_valid
) {
1175 offset
= (iomap
.iomap_offset
+ iomap
.iomap_bsize
- 1) >>
1177 tlast
= min_t(pgoff_t
, offset
, last_index
);
1178 xfs_cluster_write(inode
, page
->index
+ 1, &iomap
, &ioend
,
1179 wbc
, startio
, all_bh
, tlast
);
1183 xfs_submit_ioend(iohead
);
1189 xfs_cancel_ioend(iohead
);
1192 * If it's delalloc and we have nowhere to put it,
1193 * throw it away, unless the lower layers told
1196 if (err
!= -EAGAIN
) {
1198 block_invalidatepage(page
, 0);
1199 ClearPageUptodate(page
);
1205 * writepage: Called from one of two places:
1207 * 1. we are flushing a delalloc buffer head.
1209 * 2. we are writing out a dirty page. Typically the page dirty
1210 * state is cleared before we get here. In this case is it
1211 * conceivable we have no buffer heads.
1213 * For delalloc space on the page we need to allocate space and
1214 * flush it. For unmapped buffer heads on the page we should
1215 * allocate space if the page is uptodate. For any other dirty
1216 * buffer heads on the page we should flush them.
1218 * If we detect that a transaction would be required to flush
1219 * the page, we have to check the process flags first, if we
1220 * are already in a transaction or disk I/O during allocations
1221 * is off, we need to fail the writepage and redirty the page.
1227 struct writeback_control
*wbc
)
1231 int delalloc
, unmapped
, unwritten
;
1232 struct inode
*inode
= page
->mapping
->host
;
1234 xfs_page_trace(XFS_WRITEPAGE_ENTER
, inode
, page
, 0);
1237 * We need a transaction if:
1238 * 1. There are delalloc buffers on the page
1239 * 2. The page is uptodate and we have unmapped buffers
1240 * 3. The page is uptodate and we have no buffers
1241 * 4. There are unwritten buffers on the page
1244 if (!page_has_buffers(page
)) {
1248 xfs_count_page_state(page
, &delalloc
, &unmapped
, &unwritten
);
1249 if (!PageUptodate(page
))
1251 need_trans
= delalloc
+ unmapped
+ unwritten
;
1255 * If we need a transaction and the process flags say
1256 * we are already in a transaction, or no IO is allowed
1257 * then mark the page dirty again and leave the page
1260 if (current_test_flags(PF_FSTRANS
) && need_trans
)
1264 * Delay hooking up buffer heads until we have
1265 * made our go/no-go decision.
1267 if (!page_has_buffers(page
))
1268 create_empty_buffers(page
, 1 << inode
->i_blkbits
, 0);
1272 * VM calculation for nr_to_write seems off. Bump it way
1273 * up, this gets simple streaming writes zippy again.
1274 * To be reviewed again after Jens' writeback changes.
1276 wbc
->nr_to_write
*= 4;
1279 * Convert delayed allocate, unwritten or unmapped space
1280 * to real space and flush out to disk.
1282 error
= xfs_page_state_convert(inode
, page
, wbc
, 1, unmapped
);
1283 if (error
== -EAGAIN
)
1285 if (unlikely(error
< 0))
1291 redirty_page_for_writepage(wbc
, page
);
1301 struct address_space
*mapping
,
1302 struct writeback_control
*wbc
)
1304 xfs_iflags_clear(XFS_I(mapping
->host
), XFS_ITRUNCATED
);
1305 return generic_writepages(mapping
, wbc
);
1309 * Called to move a page into cleanable state - and from there
1310 * to be released. Possibly the page is already clean. We always
1311 * have buffer heads in this call.
1313 * Returns 0 if the page is ok to release, 1 otherwise.
1315 * Possible scenarios are:
1317 * 1. We are being called to release a page which has been written
1318 * to via regular I/O. buffer heads will be dirty and possibly
1319 * delalloc. If no delalloc buffer heads in this case then we
1320 * can just return zero.
1322 * 2. We are called to release a page which has been written via
1323 * mmap, all we need to do is ensure there is no delalloc
1324 * state in the buffer heads, if not we can let the caller
1325 * free them and we should come back later via writepage.
1332 struct inode
*inode
= page
->mapping
->host
;
1333 int dirty
, delalloc
, unmapped
, unwritten
;
1334 struct writeback_control wbc
= {
1335 .sync_mode
= WB_SYNC_ALL
,
1339 xfs_page_trace(XFS_RELEASEPAGE_ENTER
, inode
, page
, 0);
1341 if (!page_has_buffers(page
))
1344 xfs_count_page_state(page
, &delalloc
, &unmapped
, &unwritten
);
1345 if (!delalloc
&& !unwritten
)
1348 if (!(gfp_mask
& __GFP_FS
))
1351 /* If we are already inside a transaction or the thread cannot
1352 * do I/O, we cannot release this page.
1354 if (current_test_flags(PF_FSTRANS
))
1358 * Convert delalloc space to real space, do not flush the
1359 * data out to disk, that will be done by the caller.
1360 * Never need to allocate space here - we will always
1361 * come back to writepage in that case.
1363 dirty
= xfs_page_state_convert(inode
, page
, &wbc
, 0, 0);
1364 if (dirty
== 0 && !unwritten
)
1369 return try_to_free_buffers(page
);
1374 struct inode
*inode
,
1376 struct buffer_head
*bh_result
,
1379 bmapi_flags_t flags
)
1387 offset
= (xfs_off_t
)iblock
<< inode
->i_blkbits
;
1388 ASSERT(bh_result
->b_size
>= (1 << inode
->i_blkbits
));
1389 size
= bh_result
->b_size
;
1391 if (!create
&& direct
&& offset
>= i_size_read(inode
))
1394 error
= xfs_iomap(XFS_I(inode
), offset
, size
,
1395 create
? flags
: BMAPI_READ
, &iomap
, &niomap
);
1401 if (iomap
.iomap_bn
!= IOMAP_DADDR_NULL
) {
1403 * For unwritten extents do not report a disk address on
1404 * the read case (treat as if we're reading into a hole).
1406 if (create
|| !(iomap
.iomap_flags
& IOMAP_UNWRITTEN
)) {
1407 xfs_map_buffer(bh_result
, &iomap
, offset
,
1410 if (create
&& (iomap
.iomap_flags
& IOMAP_UNWRITTEN
)) {
1412 bh_result
->b_private
= inode
;
1413 set_buffer_unwritten(bh_result
);
1418 * If this is a realtime file, data may be on a different device.
1419 * to that pointed to from the buffer_head b_bdev currently.
1421 bh_result
->b_bdev
= iomap
.iomap_target
->bt_bdev
;
1424 * If we previously allocated a block out beyond eof and we are now
1425 * coming back to use it then we will need to flag it as new even if it
1426 * has a disk address.
1428 * With sub-block writes into unwritten extents we also need to mark
1429 * the buffer as new so that the unwritten parts of the buffer gets
1433 ((!buffer_mapped(bh_result
) && !buffer_uptodate(bh_result
)) ||
1434 (offset
>= i_size_read(inode
)) ||
1435 (iomap
.iomap_flags
& (IOMAP_NEW
|IOMAP_UNWRITTEN
))))
1436 set_buffer_new(bh_result
);
1438 if (iomap
.iomap_flags
& IOMAP_DELAY
) {
1441 set_buffer_uptodate(bh_result
);
1442 set_buffer_mapped(bh_result
);
1443 set_buffer_delay(bh_result
);
1447 if (direct
|| size
> (1 << inode
->i_blkbits
)) {
1448 ASSERT(iomap
.iomap_bsize
- iomap
.iomap_delta
> 0);
1449 offset
= min_t(xfs_off_t
,
1450 iomap
.iomap_bsize
- iomap
.iomap_delta
, size
);
1451 bh_result
->b_size
= (ssize_t
)min_t(xfs_off_t
, LONG_MAX
, offset
);
1459 struct inode
*inode
,
1461 struct buffer_head
*bh_result
,
1464 return __xfs_get_blocks(inode
, iblock
,
1465 bh_result
, create
, 0, BMAPI_WRITE
);
1469 xfs_get_blocks_direct(
1470 struct inode
*inode
,
1472 struct buffer_head
*bh_result
,
1475 return __xfs_get_blocks(inode
, iblock
,
1476 bh_result
, create
, 1, BMAPI_WRITE
|BMAPI_DIRECT
);
1486 xfs_ioend_t
*ioend
= iocb
->private;
1489 * Non-NULL private data means we need to issue a transaction to
1490 * convert a range from unwritten to written extents. This needs
1491 * to happen from process context but aio+dio I/O completion
1492 * happens from irq context so we need to defer it to a workqueue.
1493 * This is not necessary for synchronous direct I/O, but we do
1494 * it anyway to keep the code uniform and simpler.
1496 * Well, if only it were that simple. Because synchronous direct I/O
1497 * requires extent conversion to occur *before* we return to userspace,
1498 * we have to wait for extent conversion to complete. Look at the
1499 * iocb that has been passed to us to determine if this is AIO or
1500 * not. If it is synchronous, tell xfs_finish_ioend() to kick the
1501 * workqueue and wait for it to complete.
1503 * The core direct I/O code might be changed to always call the
1504 * completion handler in the future, in which case all this can
1507 ioend
->io_offset
= offset
;
1508 ioend
->io_size
= size
;
1509 if (ioend
->io_type
== IOMAP_READ
) {
1510 xfs_finish_ioend(ioend
, 0);
1511 } else if (private && size
> 0) {
1512 xfs_finish_ioend(ioend
, is_sync_kiocb(iocb
));
1515 * A direct I/O write ioend starts it's life in unwritten
1516 * state in case they map an unwritten extent. This write
1517 * didn't map an unwritten extent so switch it's completion
1520 INIT_WORK(&ioend
->io_work
, xfs_end_bio_written
);
1521 xfs_finish_ioend(ioend
, 0);
1525 * blockdev_direct_IO can return an error even after the I/O
1526 * completion handler was called. Thus we need to protect
1527 * against double-freeing.
1529 iocb
->private = NULL
;
1536 const struct iovec
*iov
,
1538 unsigned long nr_segs
)
1540 struct file
*file
= iocb
->ki_filp
;
1541 struct inode
*inode
= file
->f_mapping
->host
;
1542 struct block_device
*bdev
;
1545 bdev
= xfs_find_bdev_for_inode(XFS_I(inode
));
1548 iocb
->private = xfs_alloc_ioend(inode
, IOMAP_UNWRITTEN
);
1549 ret
= blockdev_direct_IO_own_locking(rw
, iocb
, inode
,
1550 bdev
, iov
, offset
, nr_segs
,
1551 xfs_get_blocks_direct
,
1554 iocb
->private = xfs_alloc_ioend(inode
, IOMAP_READ
);
1555 ret
= blockdev_direct_IO_no_locking(rw
, iocb
, inode
,
1556 bdev
, iov
, offset
, nr_segs
,
1557 xfs_get_blocks_direct
,
1561 if (unlikely(ret
!= -EIOCBQUEUED
&& iocb
->private))
1562 xfs_destroy_ioend(iocb
->private);
1569 struct address_space
*mapping
,
1573 struct page
**pagep
,
1577 return block_write_begin(file
, mapping
, pos
, len
, flags
, pagep
, fsdata
,
1583 struct address_space
*mapping
,
1586 struct inode
*inode
= (struct inode
*)mapping
->host
;
1587 struct xfs_inode
*ip
= XFS_I(inode
);
1589 xfs_itrace_entry(XFS_I(inode
));
1590 xfs_ilock(ip
, XFS_IOLOCK_SHARED
);
1591 xfs_flush_pages(ip
, (xfs_off_t
)0, -1, 0, FI_REMAPF
);
1592 xfs_iunlock(ip
, XFS_IOLOCK_SHARED
);
1593 return generic_block_bmap(mapping
, block
, xfs_get_blocks
);
1598 struct file
*unused
,
1601 return mpage_readpage(page
, xfs_get_blocks
);
1606 struct file
*unused
,
1607 struct address_space
*mapping
,
1608 struct list_head
*pages
,
1611 return mpage_readpages(mapping
, pages
, nr_pages
, xfs_get_blocks
);
1615 xfs_vm_invalidatepage(
1617 unsigned long offset
)
1619 xfs_page_trace(XFS_INVALIDPAGE_ENTER
,
1620 page
->mapping
->host
, page
, offset
);
1621 block_invalidatepage(page
, offset
);
1624 const struct address_space_operations xfs_address_space_operations
= {
1625 .readpage
= xfs_vm_readpage
,
1626 .readpages
= xfs_vm_readpages
,
1627 .writepage
= xfs_vm_writepage
,
1628 .writepages
= xfs_vm_writepages
,
1629 .sync_page
= block_sync_page
,
1630 .releasepage
= xfs_vm_releasepage
,
1631 .invalidatepage
= xfs_vm_invalidatepage
,
1632 .write_begin
= xfs_vm_write_begin
,
1633 .write_end
= generic_write_end
,
1634 .bmap
= xfs_vm_bmap
,
1635 .direct_IO
= xfs_vm_direct_IO
,
1636 .migratepage
= buffer_migrate_page
,
1637 .is_partially_uptodate
= block_is_partially_uptodate
,