2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "xfs_trans.h"
23 #include "xfs_mount.h"
24 #include "xfs_bmap_btree.h"
25 #include "xfs_dinode.h"
26 #include "xfs_inode.h"
27 #include "xfs_inode_item.h"
28 #include "xfs_alloc.h"
29 #include "xfs_error.h"
30 #include "xfs_iomap.h"
31 #include "xfs_vnodeops.h"
32 #include "xfs_trace.h"
34 #include <linux/aio.h>
35 #include <linux/gfp.h>
36 #include <linux/mpage.h>
37 #include <linux/pagevec.h>
38 #include <linux/writeback.h>
46 struct buffer_head
*bh
, *head
;
48 *delalloc
= *unwritten
= 0;
50 bh
= head
= page_buffers(page
);
52 if (buffer_unwritten(bh
))
54 else if (buffer_delay(bh
))
56 } while ((bh
= bh
->b_this_page
) != head
);
59 STATIC
struct block_device
*
60 xfs_find_bdev_for_inode(
63 struct xfs_inode
*ip
= XFS_I(inode
);
64 struct xfs_mount
*mp
= ip
->i_mount
;
66 if (XFS_IS_REALTIME_INODE(ip
))
67 return mp
->m_rtdev_targp
->bt_bdev
;
69 return mp
->m_ddev_targp
->bt_bdev
;
73 * We're now finished for good with this ioend structure.
74 * Update the page state via the associated buffer_heads,
75 * release holds on the inode and bio, and finally free
76 * up memory. Do not use the ioend after this.
82 struct buffer_head
*bh
, *next
;
84 for (bh
= ioend
->io_buffer_head
; bh
; bh
= next
) {
86 bh
->b_end_io(bh
, !ioend
->io_error
);
90 inode_dio_done(ioend
->io_inode
);
91 if (ioend
->io_isasync
) {
92 aio_complete(ioend
->io_iocb
, ioend
->io_error
?
93 ioend
->io_error
: ioend
->io_result
, 0);
97 mempool_free(ioend
, xfs_ioend_pool
);
101 * Fast and loose check if this write could update the on-disk inode size.
103 static inline bool xfs_ioend_is_append(struct xfs_ioend
*ioend
)
105 return ioend
->io_offset
+ ioend
->io_size
>
106 XFS_I(ioend
->io_inode
)->i_d
.di_size
;
110 xfs_setfilesize_trans_alloc(
111 struct xfs_ioend
*ioend
)
113 struct xfs_mount
*mp
= XFS_I(ioend
->io_inode
)->i_mount
;
114 struct xfs_trans
*tp
;
117 tp
= xfs_trans_alloc(mp
, XFS_TRANS_FSYNC_TS
);
119 error
= xfs_trans_reserve(tp
, 0, XFS_FSYNC_TS_LOG_RES(mp
), 0, 0, 0);
121 xfs_trans_cancel(tp
, 0);
125 ioend
->io_append_trans
= tp
;
128 * We may pass freeze protection with a transaction. So tell lockdep
131 rwsem_release(&ioend
->io_inode
->i_sb
->s_writers
.lock_map
[SB_FREEZE_FS
-1],
134 * We hand off the transaction to the completion thread now, so
135 * clear the flag here.
137 current_restore_flags_nested(&tp
->t_pflags
, PF_FSTRANS
);
142 * Update on-disk file size now that data has been written to disk.
146 struct xfs_ioend
*ioend
)
148 struct xfs_inode
*ip
= XFS_I(ioend
->io_inode
);
149 struct xfs_trans
*tp
= ioend
->io_append_trans
;
153 * The transaction may have been allocated in the I/O submission thread,
154 * thus we need to mark ourselves as beeing in a transaction manually.
155 * Similarly for freeze protection.
157 current_set_flags_nested(&tp
->t_pflags
, PF_FSTRANS
);
158 rwsem_acquire_read(&VFS_I(ip
)->i_sb
->s_writers
.lock_map
[SB_FREEZE_FS
-1],
161 /* we abort the update if there was an IO error */
162 if (ioend
->io_error
) {
163 xfs_trans_cancel(tp
, 0);
164 return ioend
->io_error
;
167 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
168 isize
= xfs_new_eof(ip
, ioend
->io_offset
+ ioend
->io_size
);
170 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
171 xfs_trans_cancel(tp
, 0);
175 trace_xfs_setfilesize(ip
, ioend
->io_offset
, ioend
->io_size
);
177 ip
->i_d
.di_size
= isize
;
178 xfs_trans_ijoin(tp
, ip
, XFS_ILOCK_EXCL
);
179 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
181 return xfs_trans_commit(tp
, 0);
185 * Schedule IO completion handling on the final put of an ioend.
187 * If there is no work to do we might as well call it a day and free the
192 struct xfs_ioend
*ioend
)
194 if (atomic_dec_and_test(&ioend
->io_remaining
)) {
195 struct xfs_mount
*mp
= XFS_I(ioend
->io_inode
)->i_mount
;
197 if (ioend
->io_type
== XFS_IO_UNWRITTEN
)
198 queue_work(mp
->m_unwritten_workqueue
, &ioend
->io_work
);
199 else if (ioend
->io_append_trans
||
200 (ioend
->io_isdirect
&& xfs_ioend_is_append(ioend
)))
201 queue_work(mp
->m_data_workqueue
, &ioend
->io_work
);
203 xfs_destroy_ioend(ioend
);
208 * IO write completion.
212 struct work_struct
*work
)
214 xfs_ioend_t
*ioend
= container_of(work
, xfs_ioend_t
, io_work
);
215 struct xfs_inode
*ip
= XFS_I(ioend
->io_inode
);
218 if (XFS_FORCED_SHUTDOWN(ip
->i_mount
)) {
219 ioend
->io_error
= -EIO
;
224 * For unwritten extents we need to issue transactions to convert a
225 * range to normal written extens after the data I/O has finished.
226 * Detecting and handling completion IO errors is done individually
227 * for each case as different cleanup operations need to be performed
230 if (ioend
->io_type
== XFS_IO_UNWRITTEN
) {
233 error
= xfs_iomap_write_unwritten(ip
, ioend
->io_offset
,
235 } else if (ioend
->io_isdirect
&& xfs_ioend_is_append(ioend
)) {
237 * For direct I/O we do not know if we need to allocate blocks
238 * or not so we can't preallocate an append transaction as that
239 * results in nested reservations and log space deadlocks. Hence
240 * allocate the transaction here. While this is sub-optimal and
241 * can block IO completion for some time, we're stuck with doing
242 * it this way until we can pass the ioend to the direct IO
243 * allocation callbacks and avoid nesting that way.
245 error
= xfs_setfilesize_trans_alloc(ioend
);
248 error
= xfs_setfilesize(ioend
);
249 } else if (ioend
->io_append_trans
) {
250 error
= xfs_setfilesize(ioend
);
252 ASSERT(!xfs_ioend_is_append(ioend
));
257 ioend
->io_error
= -error
;
258 xfs_destroy_ioend(ioend
);
262 * Call IO completion handling in caller context on the final put of an ioend.
265 xfs_finish_ioend_sync(
266 struct xfs_ioend
*ioend
)
268 if (atomic_dec_and_test(&ioend
->io_remaining
))
269 xfs_end_io(&ioend
->io_work
);
273 * Allocate and initialise an IO completion structure.
274 * We need to track unwritten extent write completion here initially.
275 * We'll need to extend this for updating the ondisk inode size later
285 ioend
= mempool_alloc(xfs_ioend_pool
, GFP_NOFS
);
288 * Set the count to 1 initially, which will prevent an I/O
289 * completion callback from happening before we have started
290 * all the I/O from calling the completion routine too early.
292 atomic_set(&ioend
->io_remaining
, 1);
293 ioend
->io_isasync
= 0;
294 ioend
->io_isdirect
= 0;
296 ioend
->io_list
= NULL
;
297 ioend
->io_type
= type
;
298 ioend
->io_inode
= inode
;
299 ioend
->io_buffer_head
= NULL
;
300 ioend
->io_buffer_tail
= NULL
;
301 ioend
->io_offset
= 0;
303 ioend
->io_iocb
= NULL
;
304 ioend
->io_result
= 0;
305 ioend
->io_append_trans
= NULL
;
307 INIT_WORK(&ioend
->io_work
, xfs_end_io
);
315 struct xfs_bmbt_irec
*imap
,
319 struct xfs_inode
*ip
= XFS_I(inode
);
320 struct xfs_mount
*mp
= ip
->i_mount
;
321 ssize_t count
= 1 << inode
->i_blkbits
;
322 xfs_fileoff_t offset_fsb
, end_fsb
;
324 int bmapi_flags
= XFS_BMAPI_ENTIRE
;
327 if (XFS_FORCED_SHUTDOWN(mp
))
328 return -XFS_ERROR(EIO
);
330 if (type
== XFS_IO_UNWRITTEN
)
331 bmapi_flags
|= XFS_BMAPI_IGSTATE
;
333 if (!xfs_ilock_nowait(ip
, XFS_ILOCK_SHARED
)) {
335 return -XFS_ERROR(EAGAIN
);
336 xfs_ilock(ip
, XFS_ILOCK_SHARED
);
339 ASSERT(ip
->i_d
.di_format
!= XFS_DINODE_FMT_BTREE
||
340 (ip
->i_df
.if_flags
& XFS_IFEXTENTS
));
341 ASSERT(offset
<= mp
->m_super
->s_maxbytes
);
343 if (offset
+ count
> mp
->m_super
->s_maxbytes
)
344 count
= mp
->m_super
->s_maxbytes
- offset
;
345 end_fsb
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)offset
+ count
);
346 offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
347 error
= xfs_bmapi_read(ip
, offset_fsb
, end_fsb
- offset_fsb
,
348 imap
, &nimaps
, bmapi_flags
);
349 xfs_iunlock(ip
, XFS_ILOCK_SHARED
);
352 return -XFS_ERROR(error
);
354 if (type
== XFS_IO_DELALLOC
&&
355 (!nimaps
|| isnullstartblock(imap
->br_startblock
))) {
356 error
= xfs_iomap_write_allocate(ip
, offset
, count
, imap
);
358 trace_xfs_map_blocks_alloc(ip
, offset
, count
, type
, imap
);
359 return -XFS_ERROR(error
);
363 if (type
== XFS_IO_UNWRITTEN
) {
365 ASSERT(imap
->br_startblock
!= HOLESTARTBLOCK
);
366 ASSERT(imap
->br_startblock
!= DELAYSTARTBLOCK
);
370 trace_xfs_map_blocks_found(ip
, offset
, count
, type
, imap
);
377 struct xfs_bmbt_irec
*imap
,
380 offset
>>= inode
->i_blkbits
;
382 return offset
>= imap
->br_startoff
&&
383 offset
< imap
->br_startoff
+ imap
->br_blockcount
;
387 * BIO completion handler for buffered IO.
394 xfs_ioend_t
*ioend
= bio
->bi_private
;
396 ASSERT(atomic_read(&bio
->bi_cnt
) >= 1);
397 ioend
->io_error
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
) ? 0 : error
;
399 /* Toss bio and pass work off to an xfsdatad thread */
400 bio
->bi_private
= NULL
;
401 bio
->bi_end_io
= NULL
;
404 xfs_finish_ioend(ioend
);
408 xfs_submit_ioend_bio(
409 struct writeback_control
*wbc
,
413 atomic_inc(&ioend
->io_remaining
);
414 bio
->bi_private
= ioend
;
415 bio
->bi_end_io
= xfs_end_bio
;
416 submit_bio(wbc
->sync_mode
== WB_SYNC_ALL
? WRITE_SYNC
: WRITE
, bio
);
421 struct buffer_head
*bh
)
423 int nvecs
= bio_get_nr_vecs(bh
->b_bdev
);
424 struct bio
*bio
= bio_alloc(GFP_NOIO
, nvecs
);
426 ASSERT(bio
->bi_private
== NULL
);
427 bio
->bi_sector
= bh
->b_blocknr
* (bh
->b_size
>> 9);
428 bio
->bi_bdev
= bh
->b_bdev
;
433 xfs_start_buffer_writeback(
434 struct buffer_head
*bh
)
436 ASSERT(buffer_mapped(bh
));
437 ASSERT(buffer_locked(bh
));
438 ASSERT(!buffer_delay(bh
));
439 ASSERT(!buffer_unwritten(bh
));
441 mark_buffer_async_write(bh
);
442 set_buffer_uptodate(bh
);
443 clear_buffer_dirty(bh
);
447 xfs_start_page_writeback(
452 ASSERT(PageLocked(page
));
453 ASSERT(!PageWriteback(page
));
455 clear_page_dirty_for_io(page
);
456 set_page_writeback(page
);
458 /* If no buffers on the page are to be written, finish it here */
460 end_page_writeback(page
);
463 static inline int bio_add_buffer(struct bio
*bio
, struct buffer_head
*bh
)
465 return bio_add_page(bio
, bh
->b_page
, bh
->b_size
, bh_offset(bh
));
469 * Submit all of the bios for all of the ioends we have saved up, covering the
470 * initial writepage page and also any probed pages.
472 * Because we may have multiple ioends spanning a page, we need to start
473 * writeback on all the buffers before we submit them for I/O. If we mark the
474 * buffers as we got, then we can end up with a page that only has buffers
475 * marked async write and I/O complete on can occur before we mark the other
476 * buffers async write.
478 * The end result of this is that we trip a bug in end_page_writeback() because
479 * we call it twice for the one page as the code in end_buffer_async_write()
480 * assumes that all buffers on the page are started at the same time.
482 * The fix is two passes across the ioend list - one to start writeback on the
483 * buffer_heads, and then submit them for I/O on the second pass.
485 * If @fail is non-zero, it means that we have a situation where some part of
486 * the submission process has failed after we have marked paged for writeback
487 * and unlocked them. In this situation, we need to fail the ioend chain rather
488 * than submit it to IO. This typically only happens on a filesystem shutdown.
492 struct writeback_control
*wbc
,
496 xfs_ioend_t
*head
= ioend
;
498 struct buffer_head
*bh
;
500 sector_t lastblock
= 0;
502 /* Pass 1 - start writeback */
504 next
= ioend
->io_list
;
505 for (bh
= ioend
->io_buffer_head
; bh
; bh
= bh
->b_private
)
506 xfs_start_buffer_writeback(bh
);
507 } while ((ioend
= next
) != NULL
);
509 /* Pass 2 - submit I/O */
512 next
= ioend
->io_list
;
516 * If we are failing the IO now, just mark the ioend with an
517 * error and finish it. This will run IO completion immediately
518 * as there is only one reference to the ioend at this point in
522 ioend
->io_error
= -fail
;
523 xfs_finish_ioend(ioend
);
527 for (bh
= ioend
->io_buffer_head
; bh
; bh
= bh
->b_private
) {
531 bio
= xfs_alloc_ioend_bio(bh
);
532 } else if (bh
->b_blocknr
!= lastblock
+ 1) {
533 xfs_submit_ioend_bio(wbc
, ioend
, bio
);
537 if (bio_add_buffer(bio
, bh
) != bh
->b_size
) {
538 xfs_submit_ioend_bio(wbc
, ioend
, bio
);
542 lastblock
= bh
->b_blocknr
;
545 xfs_submit_ioend_bio(wbc
, ioend
, bio
);
546 xfs_finish_ioend(ioend
);
547 } while ((ioend
= next
) != NULL
);
551 * Cancel submission of all buffer_heads so far in this endio.
552 * Toss the endio too. Only ever called for the initial page
553 * in a writepage request, so only ever one page.
560 struct buffer_head
*bh
, *next_bh
;
563 next
= ioend
->io_list
;
564 bh
= ioend
->io_buffer_head
;
566 next_bh
= bh
->b_private
;
567 clear_buffer_async_write(bh
);
569 } while ((bh
= next_bh
) != NULL
);
571 mempool_free(ioend
, xfs_ioend_pool
);
572 } while ((ioend
= next
) != NULL
);
576 * Test to see if we've been building up a completion structure for
577 * earlier buffers -- if so, we try to append to this ioend if we
578 * can, otherwise we finish off any current ioend and start another.
579 * Return true if we've finished the given ioend.
584 struct buffer_head
*bh
,
587 xfs_ioend_t
**result
,
590 xfs_ioend_t
*ioend
= *result
;
592 if (!ioend
|| need_ioend
|| type
!= ioend
->io_type
) {
593 xfs_ioend_t
*previous
= *result
;
595 ioend
= xfs_alloc_ioend(inode
, type
);
596 ioend
->io_offset
= offset
;
597 ioend
->io_buffer_head
= bh
;
598 ioend
->io_buffer_tail
= bh
;
600 previous
->io_list
= ioend
;
603 ioend
->io_buffer_tail
->b_private
= bh
;
604 ioend
->io_buffer_tail
= bh
;
607 bh
->b_private
= NULL
;
608 ioend
->io_size
+= bh
->b_size
;
614 struct buffer_head
*bh
,
615 struct xfs_bmbt_irec
*imap
,
619 struct xfs_mount
*m
= XFS_I(inode
)->i_mount
;
620 xfs_off_t iomap_offset
= XFS_FSB_TO_B(m
, imap
->br_startoff
);
621 xfs_daddr_t iomap_bn
= xfs_fsb_to_db(XFS_I(inode
), imap
->br_startblock
);
623 ASSERT(imap
->br_startblock
!= HOLESTARTBLOCK
);
624 ASSERT(imap
->br_startblock
!= DELAYSTARTBLOCK
);
626 bn
= (iomap_bn
>> (inode
->i_blkbits
- BBSHIFT
)) +
627 ((offset
- iomap_offset
) >> inode
->i_blkbits
);
629 ASSERT(bn
|| XFS_IS_REALTIME_INODE(XFS_I(inode
)));
632 set_buffer_mapped(bh
);
638 struct buffer_head
*bh
,
639 struct xfs_bmbt_irec
*imap
,
642 ASSERT(imap
->br_startblock
!= HOLESTARTBLOCK
);
643 ASSERT(imap
->br_startblock
!= DELAYSTARTBLOCK
);
645 xfs_map_buffer(inode
, bh
, imap
, offset
);
646 set_buffer_mapped(bh
);
647 clear_buffer_delay(bh
);
648 clear_buffer_unwritten(bh
);
652 * Test if a given page is suitable for writing as part of an unwritten
653 * or delayed allocate extent.
660 if (PageWriteback(page
))
663 if (page
->mapping
&& page_has_buffers(page
)) {
664 struct buffer_head
*bh
, *head
;
667 bh
= head
= page_buffers(page
);
669 if (buffer_unwritten(bh
))
670 acceptable
+= (type
== XFS_IO_UNWRITTEN
);
671 else if (buffer_delay(bh
))
672 acceptable
+= (type
== XFS_IO_DELALLOC
);
673 else if (buffer_dirty(bh
) && buffer_mapped(bh
))
674 acceptable
+= (type
== XFS_IO_OVERWRITE
);
677 } while ((bh
= bh
->b_this_page
) != head
);
687 * Allocate & map buffers for page given the extent map. Write it out.
688 * except for the original page of a writepage, this is called on
689 * delalloc/unwritten pages only, for the original page it is possible
690 * that the page has no mapping at all.
697 struct xfs_bmbt_irec
*imap
,
698 xfs_ioend_t
**ioendp
,
699 struct writeback_control
*wbc
)
701 struct buffer_head
*bh
, *head
;
702 xfs_off_t end_offset
;
703 unsigned long p_offset
;
706 int count
= 0, done
= 0, uptodate
= 1;
707 xfs_off_t offset
= page_offset(page
);
709 if (page
->index
!= tindex
)
711 if (!trylock_page(page
))
713 if (PageWriteback(page
))
714 goto fail_unlock_page
;
715 if (page
->mapping
!= inode
->i_mapping
)
716 goto fail_unlock_page
;
717 if (!xfs_check_page_type(page
, (*ioendp
)->io_type
))
718 goto fail_unlock_page
;
721 * page_dirty is initially a count of buffers on the page before
722 * EOF and is decremented as we move each into a cleanable state.
726 * End offset is the highest offset that this page should represent.
727 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
728 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
729 * hence give us the correct page_dirty count. On any other page,
730 * it will be zero and in that case we need page_dirty to be the
731 * count of buffers on the page.
733 end_offset
= min_t(unsigned long long,
734 (xfs_off_t
)(page
->index
+ 1) << PAGE_CACHE_SHIFT
,
738 * If the current map does not span the entire page we are about to try
739 * to write, then give up. The only way we can write a page that spans
740 * multiple mappings in a single writeback iteration is via the
741 * xfs_vm_writepage() function. Data integrity writeback requires the
742 * entire page to be written in a single attempt, otherwise the part of
743 * the page we don't write here doesn't get written as part of the data
746 * For normal writeback, we also don't attempt to write partial pages
747 * here as it simply means that write_cache_pages() will see it under
748 * writeback and ignore the page until some point in the future, at
749 * which time this will be the only page in the file that needs
750 * writeback. Hence for more optimal IO patterns, we should always
751 * avoid partial page writeback due to multiple mappings on a page here.
753 if (!xfs_imap_valid(inode
, imap
, end_offset
))
754 goto fail_unlock_page
;
756 len
= 1 << inode
->i_blkbits
;
757 p_offset
= min_t(unsigned long, end_offset
& (PAGE_CACHE_SIZE
- 1),
759 p_offset
= p_offset
? roundup(p_offset
, len
) : PAGE_CACHE_SIZE
;
760 page_dirty
= p_offset
/ len
;
762 bh
= head
= page_buffers(page
);
764 if (offset
>= end_offset
)
766 if (!buffer_uptodate(bh
))
768 if (!(PageUptodate(page
) || buffer_uptodate(bh
))) {
773 if (buffer_unwritten(bh
) || buffer_delay(bh
) ||
775 if (buffer_unwritten(bh
))
776 type
= XFS_IO_UNWRITTEN
;
777 else if (buffer_delay(bh
))
778 type
= XFS_IO_DELALLOC
;
780 type
= XFS_IO_OVERWRITE
;
782 if (!xfs_imap_valid(inode
, imap
, offset
)) {
788 if (type
!= XFS_IO_OVERWRITE
)
789 xfs_map_at_offset(inode
, bh
, imap
, offset
);
790 xfs_add_to_ioend(inode
, bh
, offset
, type
,
798 } while (offset
+= len
, (bh
= bh
->b_this_page
) != head
);
800 if (uptodate
&& bh
== head
)
801 SetPageUptodate(page
);
804 if (--wbc
->nr_to_write
<= 0 &&
805 wbc
->sync_mode
== WB_SYNC_NONE
)
808 xfs_start_page_writeback(page
, !page_dirty
, count
);
818 * Convert & write out a cluster of pages in the same extent as defined
819 * by mp and following the start page.
825 struct xfs_bmbt_irec
*imap
,
826 xfs_ioend_t
**ioendp
,
827 struct writeback_control
*wbc
,
833 pagevec_init(&pvec
, 0);
834 while (!done
&& tindex
<= tlast
) {
835 unsigned len
= min_t(pgoff_t
, PAGEVEC_SIZE
, tlast
- tindex
+ 1);
837 if (!pagevec_lookup(&pvec
, inode
->i_mapping
, tindex
, len
))
840 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
841 done
= xfs_convert_page(inode
, pvec
.pages
[i
], tindex
++,
847 pagevec_release(&pvec
);
853 xfs_vm_invalidatepage(
855 unsigned long offset
)
857 trace_xfs_invalidatepage(page
->mapping
->host
, page
, offset
);
858 block_invalidatepage(page
, offset
);
862 * If the page has delalloc buffers on it, we need to punch them out before we
863 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
864 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
865 * is done on that same region - the delalloc extent is returned when none is
866 * supposed to be there.
868 * We prevent this by truncating away the delalloc regions on the page before
869 * invalidating it. Because they are delalloc, we can do this without needing a
870 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
871 * truncation without a transaction as there is no space left for block
872 * reservation (typically why we see a ENOSPC in writeback).
874 * This is not a performance critical path, so for now just do the punching a
875 * buffer head at a time.
878 xfs_aops_discard_page(
881 struct inode
*inode
= page
->mapping
->host
;
882 struct xfs_inode
*ip
= XFS_I(inode
);
883 struct buffer_head
*bh
, *head
;
884 loff_t offset
= page_offset(page
);
886 if (!xfs_check_page_type(page
, XFS_IO_DELALLOC
))
889 if (XFS_FORCED_SHUTDOWN(ip
->i_mount
))
892 xfs_alert(ip
->i_mount
,
893 "page discard on page %p, inode 0x%llx, offset %llu.",
894 page
, ip
->i_ino
, offset
);
896 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
897 bh
= head
= page_buffers(page
);
900 xfs_fileoff_t start_fsb
;
902 if (!buffer_delay(bh
))
905 start_fsb
= XFS_B_TO_FSBT(ip
->i_mount
, offset
);
906 error
= xfs_bmap_punch_delalloc_range(ip
, start_fsb
, 1);
908 /* something screwed, just bail */
909 if (!XFS_FORCED_SHUTDOWN(ip
->i_mount
)) {
910 xfs_alert(ip
->i_mount
,
911 "page discard unable to remove delalloc mapping.");
916 offset
+= 1 << inode
->i_blkbits
;
918 } while ((bh
= bh
->b_this_page
) != head
);
920 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
922 xfs_vm_invalidatepage(page
, 0);
927 * Write out a dirty page.
929 * For delalloc space on the page we need to allocate space and flush it.
930 * For unwritten space on the page we need to start the conversion to
931 * regular allocated space.
932 * For any other dirty buffer heads on the page we should flush them.
937 struct writeback_control
*wbc
)
939 struct inode
*inode
= page
->mapping
->host
;
940 struct buffer_head
*bh
, *head
;
941 struct xfs_bmbt_irec imap
;
942 xfs_ioend_t
*ioend
= NULL
, *iohead
= NULL
;
945 __uint64_t end_offset
;
946 pgoff_t end_index
, last_index
;
948 int err
, imap_valid
= 0, uptodate
= 1;
952 trace_xfs_writepage(inode
, page
, 0);
954 ASSERT(page_has_buffers(page
));
957 * Refuse to write the page out if we are called from reclaim context.
959 * This avoids stack overflows when called from deeply used stacks in
960 * random callers for direct reclaim or memcg reclaim. We explicitly
961 * allow reclaim from kswapd as the stack usage there is relatively low.
963 * This should never happen except in the case of a VM regression so
966 if (WARN_ON_ONCE((current
->flags
& (PF_MEMALLOC
|PF_KSWAPD
)) ==
971 * Given that we do not allow direct reclaim to call us, we should
972 * never be called while in a filesystem transaction.
974 if (WARN_ON(current
->flags
& PF_FSTRANS
))
977 /* Is this page beyond the end of the file? */
978 offset
= i_size_read(inode
);
979 end_index
= offset
>> PAGE_CACHE_SHIFT
;
980 last_index
= (offset
- 1) >> PAGE_CACHE_SHIFT
;
981 if (page
->index
>= end_index
) {
982 unsigned offset_into_page
= offset
& (PAGE_CACHE_SIZE
- 1);
985 * Skip the page if it is fully outside i_size, e.g. due to a
986 * truncate operation that is in progress. We must redirty the
987 * page so that reclaim stops reclaiming it. Otherwise
988 * xfs_vm_releasepage() is called on it and gets confused.
990 if (page
->index
>= end_index
+ 1 || offset_into_page
== 0)
994 * The page straddles i_size. It must be zeroed out on each
995 * and every writepage invocation because it may be mmapped.
996 * "A file is mapped in multiples of the page size. For a file
997 * that is not a multiple of the page size, the remaining
998 * memory is zeroed when mapped, and writes to that region are
999 * not written out to the file."
1001 zero_user_segment(page
, offset_into_page
, PAGE_CACHE_SIZE
);
1004 end_offset
= min_t(unsigned long long,
1005 (xfs_off_t
)(page
->index
+ 1) << PAGE_CACHE_SHIFT
,
1007 len
= 1 << inode
->i_blkbits
;
1009 bh
= head
= page_buffers(page
);
1010 offset
= page_offset(page
);
1011 type
= XFS_IO_OVERWRITE
;
1013 if (wbc
->sync_mode
== WB_SYNC_NONE
)
1019 if (offset
>= end_offset
)
1021 if (!buffer_uptodate(bh
))
1025 * set_page_dirty dirties all buffers in a page, independent
1026 * of their state. The dirty state however is entirely
1027 * meaningless for holes (!mapped && uptodate), so skip
1028 * buffers covering holes here.
1030 if (!buffer_mapped(bh
) && buffer_uptodate(bh
)) {
1035 if (buffer_unwritten(bh
)) {
1036 if (type
!= XFS_IO_UNWRITTEN
) {
1037 type
= XFS_IO_UNWRITTEN
;
1040 } else if (buffer_delay(bh
)) {
1041 if (type
!= XFS_IO_DELALLOC
) {
1042 type
= XFS_IO_DELALLOC
;
1045 } else if (buffer_uptodate(bh
)) {
1046 if (type
!= XFS_IO_OVERWRITE
) {
1047 type
= XFS_IO_OVERWRITE
;
1051 if (PageUptodate(page
))
1052 ASSERT(buffer_mapped(bh
));
1054 * This buffer is not uptodate and will not be
1055 * written to disk. Ensure that we will put any
1056 * subsequent writeable buffers into a new
1064 imap_valid
= xfs_imap_valid(inode
, &imap
, offset
);
1067 * If we didn't have a valid mapping then we need to
1068 * put the new mapping into a separate ioend structure.
1069 * This ensures non-contiguous extents always have
1070 * separate ioends, which is particularly important
1071 * for unwritten extent conversion at I/O completion
1075 err
= xfs_map_blocks(inode
, offset
, &imap
, type
,
1079 imap_valid
= xfs_imap_valid(inode
, &imap
, offset
);
1083 if (type
!= XFS_IO_OVERWRITE
)
1084 xfs_map_at_offset(inode
, bh
, &imap
, offset
);
1085 xfs_add_to_ioend(inode
, bh
, offset
, type
, &ioend
,
1093 } while (offset
+= len
, ((bh
= bh
->b_this_page
) != head
));
1095 if (uptodate
&& bh
== head
)
1096 SetPageUptodate(page
);
1098 xfs_start_page_writeback(page
, 1, count
);
1100 /* if there is no IO to be submitted for this page, we are done */
1107 * Any errors from this point onwards need tobe reported through the IO
1108 * completion path as we have marked the initial page as under writeback
1112 xfs_off_t end_index
;
1114 end_index
= imap
.br_startoff
+ imap
.br_blockcount
;
1117 end_index
<<= inode
->i_blkbits
;
1120 end_index
= (end_index
- 1) >> PAGE_CACHE_SHIFT
;
1122 /* check against file size */
1123 if (end_index
> last_index
)
1124 end_index
= last_index
;
1126 xfs_cluster_write(inode
, page
->index
+ 1, &imap
, &ioend
,
1132 * Reserve log space if we might write beyond the on-disk inode size.
1135 if (ioend
->io_type
!= XFS_IO_UNWRITTEN
&& xfs_ioend_is_append(ioend
))
1136 err
= xfs_setfilesize_trans_alloc(ioend
);
1138 xfs_submit_ioend(wbc
, iohead
, err
);
1144 xfs_cancel_ioend(iohead
);
1149 xfs_aops_discard_page(page
);
1150 ClearPageUptodate(page
);
1155 redirty_page_for_writepage(wbc
, page
);
1162 struct address_space
*mapping
,
1163 struct writeback_control
*wbc
)
1165 xfs_iflags_clear(XFS_I(mapping
->host
), XFS_ITRUNCATED
);
1166 return generic_writepages(mapping
, wbc
);
1170 * Called to move a page into cleanable state - and from there
1171 * to be released. The page should already be clean. We always
1172 * have buffer heads in this call.
1174 * Returns 1 if the page is ok to release, 0 otherwise.
1181 int delalloc
, unwritten
;
1183 trace_xfs_releasepage(page
->mapping
->host
, page
, 0);
1185 xfs_count_page_state(page
, &delalloc
, &unwritten
);
1187 if (WARN_ON(delalloc
))
1189 if (WARN_ON(unwritten
))
1192 return try_to_free_buffers(page
);
1197 struct inode
*inode
,
1199 struct buffer_head
*bh_result
,
1203 struct xfs_inode
*ip
= XFS_I(inode
);
1204 struct xfs_mount
*mp
= ip
->i_mount
;
1205 xfs_fileoff_t offset_fsb
, end_fsb
;
1208 struct xfs_bmbt_irec imap
;
1214 if (XFS_FORCED_SHUTDOWN(mp
))
1215 return -XFS_ERROR(EIO
);
1217 offset
= (xfs_off_t
)iblock
<< inode
->i_blkbits
;
1218 ASSERT(bh_result
->b_size
>= (1 << inode
->i_blkbits
));
1219 size
= bh_result
->b_size
;
1221 if (!create
&& direct
&& offset
>= i_size_read(inode
))
1225 * Direct I/O is usually done on preallocated files, so try getting
1226 * a block mapping without an exclusive lock first. For buffered
1227 * writes we already have the exclusive iolock anyway, so avoiding
1228 * a lock roundtrip here by taking the ilock exclusive from the
1229 * beginning is a useful micro optimization.
1231 if (create
&& !direct
) {
1232 lockmode
= XFS_ILOCK_EXCL
;
1233 xfs_ilock(ip
, lockmode
);
1235 lockmode
= xfs_ilock_map_shared(ip
);
1238 ASSERT(offset
<= mp
->m_super
->s_maxbytes
);
1239 if (offset
+ size
> mp
->m_super
->s_maxbytes
)
1240 size
= mp
->m_super
->s_maxbytes
- offset
;
1241 end_fsb
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)offset
+ size
);
1242 offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
1244 error
= xfs_bmapi_read(ip
, offset_fsb
, end_fsb
- offset_fsb
,
1245 &imap
, &nimaps
, XFS_BMAPI_ENTIRE
);
1251 (imap
.br_startblock
== HOLESTARTBLOCK
||
1252 imap
.br_startblock
== DELAYSTARTBLOCK
))) {
1253 if (direct
|| xfs_get_extsz_hint(ip
)) {
1255 * Drop the ilock in preparation for starting the block
1256 * allocation transaction. It will be retaken
1257 * exclusively inside xfs_iomap_write_direct for the
1258 * actual allocation.
1260 xfs_iunlock(ip
, lockmode
);
1261 error
= xfs_iomap_write_direct(ip
, offset
, size
,
1268 * Delalloc reservations do not require a transaction,
1269 * we can go on without dropping the lock here. If we
1270 * are allocating a new delalloc block, make sure that
1271 * we set the new flag so that we mark the buffer new so
1272 * that we know that it is newly allocated if the write
1275 if (nimaps
&& imap
.br_startblock
== HOLESTARTBLOCK
)
1277 error
= xfs_iomap_write_delay(ip
, offset
, size
, &imap
);
1281 xfs_iunlock(ip
, lockmode
);
1284 trace_xfs_get_blocks_alloc(ip
, offset
, size
, 0, &imap
);
1285 } else if (nimaps
) {
1286 trace_xfs_get_blocks_found(ip
, offset
, size
, 0, &imap
);
1287 xfs_iunlock(ip
, lockmode
);
1289 trace_xfs_get_blocks_notfound(ip
, offset
, size
);
1293 if (imap
.br_startblock
!= HOLESTARTBLOCK
&&
1294 imap
.br_startblock
!= DELAYSTARTBLOCK
) {
1296 * For unwritten extents do not report a disk address on
1297 * the read case (treat as if we're reading into a hole).
1299 if (create
|| !ISUNWRITTEN(&imap
))
1300 xfs_map_buffer(inode
, bh_result
, &imap
, offset
);
1301 if (create
&& ISUNWRITTEN(&imap
)) {
1303 bh_result
->b_private
= inode
;
1304 set_buffer_unwritten(bh_result
);
1309 * If this is a realtime file, data may be on a different device.
1310 * to that pointed to from the buffer_head b_bdev currently.
1312 bh_result
->b_bdev
= xfs_find_bdev_for_inode(inode
);
1315 * If we previously allocated a block out beyond eof and we are now
1316 * coming back to use it then we will need to flag it as new even if it
1317 * has a disk address.
1319 * With sub-block writes into unwritten extents we also need to mark
1320 * the buffer as new so that the unwritten parts of the buffer gets
1324 ((!buffer_mapped(bh_result
) && !buffer_uptodate(bh_result
)) ||
1325 (offset
>= i_size_read(inode
)) ||
1326 (new || ISUNWRITTEN(&imap
))))
1327 set_buffer_new(bh_result
);
1329 if (imap
.br_startblock
== DELAYSTARTBLOCK
) {
1332 set_buffer_uptodate(bh_result
);
1333 set_buffer_mapped(bh_result
);
1334 set_buffer_delay(bh_result
);
1339 * If this is O_DIRECT or the mpage code calling tell them how large
1340 * the mapping is, so that we can avoid repeated get_blocks calls.
1342 if (direct
|| size
> (1 << inode
->i_blkbits
)) {
1343 xfs_off_t mapping_size
;
1345 mapping_size
= imap
.br_startoff
+ imap
.br_blockcount
- iblock
;
1346 mapping_size
<<= inode
->i_blkbits
;
1348 ASSERT(mapping_size
> 0);
1349 if (mapping_size
> size
)
1350 mapping_size
= size
;
1351 if (mapping_size
> LONG_MAX
)
1352 mapping_size
= LONG_MAX
;
1354 bh_result
->b_size
= mapping_size
;
1360 xfs_iunlock(ip
, lockmode
);
1366 struct inode
*inode
,
1368 struct buffer_head
*bh_result
,
1371 return __xfs_get_blocks(inode
, iblock
, bh_result
, create
, 0);
1375 xfs_get_blocks_direct(
1376 struct inode
*inode
,
1378 struct buffer_head
*bh_result
,
1381 return __xfs_get_blocks(inode
, iblock
, bh_result
, create
, 1);
1385 * Complete a direct I/O write request.
1387 * If the private argument is non-NULL __xfs_get_blocks signals us that we
1388 * need to issue a transaction to convert the range from unwritten to written
1389 * extents. In case this is regular synchronous I/O we just call xfs_end_io
1390 * to do this and we are done. But in case this was a successful AIO
1391 * request this handler is called from interrupt context, from which we
1392 * can't start transactions. In that case offload the I/O completion to
1393 * the workqueues we also use for buffered I/O completion.
1396 xfs_end_io_direct_write(
1404 struct xfs_ioend
*ioend
= iocb
->private;
1407 * While the generic direct I/O code updates the inode size, it does
1408 * so only after the end_io handler is called, which means our
1409 * end_io handler thinks the on-disk size is outside the in-core
1410 * size. To prevent this just update it a little bit earlier here.
1412 if (offset
+ size
> i_size_read(ioend
->io_inode
))
1413 i_size_write(ioend
->io_inode
, offset
+ size
);
1416 * blockdev_direct_IO can return an error even after the I/O
1417 * completion handler was called. Thus we need to protect
1418 * against double-freeing.
1420 iocb
->private = NULL
;
1422 ioend
->io_offset
= offset
;
1423 ioend
->io_size
= size
;
1424 ioend
->io_iocb
= iocb
;
1425 ioend
->io_result
= ret
;
1426 if (private && size
> 0)
1427 ioend
->io_type
= XFS_IO_UNWRITTEN
;
1430 ioend
->io_isasync
= 1;
1431 xfs_finish_ioend(ioend
);
1433 xfs_finish_ioend_sync(ioend
);
1441 const struct iovec
*iov
,
1443 unsigned long nr_segs
)
1445 struct inode
*inode
= iocb
->ki_filp
->f_mapping
->host
;
1446 struct block_device
*bdev
= xfs_find_bdev_for_inode(inode
);
1447 struct xfs_ioend
*ioend
= NULL
;
1451 size_t size
= iov_length(iov
, nr_segs
);
1454 * We cannot preallocate a size update transaction here as we
1455 * don't know whether allocation is necessary or not. Hence we
1456 * can only tell IO completion that one is necessary if we are
1457 * not doing unwritten extent conversion.
1459 iocb
->private = ioend
= xfs_alloc_ioend(inode
, XFS_IO_DIRECT
);
1460 if (offset
+ size
> XFS_I(inode
)->i_d
.di_size
)
1461 ioend
->io_isdirect
= 1;
1463 ret
= __blockdev_direct_IO(rw
, iocb
, inode
, bdev
, iov
,
1465 xfs_get_blocks_direct
,
1466 xfs_end_io_direct_write
, NULL
, 0);
1467 if (ret
!= -EIOCBQUEUED
&& iocb
->private)
1468 goto out_destroy_ioend
;
1470 ret
= __blockdev_direct_IO(rw
, iocb
, inode
, bdev
, iov
,
1472 xfs_get_blocks_direct
,
1479 xfs_destroy_ioend(ioend
);
1484 * Punch out the delalloc blocks we have already allocated.
1486 * Don't bother with xfs_setattr given that nothing can have made it to disk yet
1487 * as the page is still locked at this point.
1490 xfs_vm_kill_delalloc_range(
1491 struct inode
*inode
,
1495 struct xfs_inode
*ip
= XFS_I(inode
);
1496 xfs_fileoff_t start_fsb
;
1497 xfs_fileoff_t end_fsb
;
1500 start_fsb
= XFS_B_TO_FSB(ip
->i_mount
, start
);
1501 end_fsb
= XFS_B_TO_FSB(ip
->i_mount
, end
);
1502 if (end_fsb
<= start_fsb
)
1505 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
1506 error
= xfs_bmap_punch_delalloc_range(ip
, start_fsb
,
1507 end_fsb
- start_fsb
);
1509 /* something screwed, just bail */
1510 if (!XFS_FORCED_SHUTDOWN(ip
->i_mount
)) {
1511 xfs_alert(ip
->i_mount
,
1512 "xfs_vm_write_failed: unable to clean up ino %lld",
1516 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1520 xfs_vm_write_failed(
1521 struct inode
*inode
,
1526 loff_t block_offset
= pos
& PAGE_MASK
;
1529 loff_t from
= pos
& (PAGE_CACHE_SIZE
- 1);
1530 loff_t to
= from
+ len
;
1531 struct buffer_head
*bh
, *head
;
1533 ASSERT(block_offset
+ from
== pos
);
1535 head
= page_buffers(page
);
1537 for (bh
= head
; bh
!= head
|| !block_start
;
1538 bh
= bh
->b_this_page
, block_start
= block_end
,
1539 block_offset
+= bh
->b_size
) {
1540 block_end
= block_start
+ bh
->b_size
;
1542 /* skip buffers before the write */
1543 if (block_end
<= from
)
1546 /* if the buffer is after the write, we're done */
1547 if (block_start
>= to
)
1550 if (!buffer_delay(bh
))
1553 if (!buffer_new(bh
) && block_offset
< i_size_read(inode
))
1556 xfs_vm_kill_delalloc_range(inode
, block_offset
,
1557 block_offset
+ bh
->b_size
);
1563 * This used to call block_write_begin(), but it unlocks and releases the page
1564 * on error, and we need that page to be able to punch stale delalloc blocks out
1565 * on failure. hence we copy-n-waste it here and call xfs_vm_write_failed() at
1566 * the appropriate point.
1571 struct address_space
*mapping
,
1575 struct page
**pagep
,
1578 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
1582 ASSERT(len
<= PAGE_CACHE_SIZE
);
1584 page
= grab_cache_page_write_begin(mapping
, index
,
1585 flags
| AOP_FLAG_NOFS
);
1589 status
= __block_write_begin(page
, pos
, len
, xfs_get_blocks
);
1590 if (unlikely(status
)) {
1591 struct inode
*inode
= mapping
->host
;
1593 xfs_vm_write_failed(inode
, page
, pos
, len
);
1596 if (pos
+ len
> i_size_read(inode
))
1597 truncate_pagecache(inode
, pos
+ len
, i_size_read(inode
));
1599 page_cache_release(page
);
1608 * On failure, we only need to kill delalloc blocks beyond EOF because they
1609 * will never be written. For blocks within EOF, generic_write_end() zeros them
1610 * so they are safe to leave alone and be written with all the other valid data.
1615 struct address_space
*mapping
,
1624 ASSERT(len
<= PAGE_CACHE_SIZE
);
1626 ret
= generic_write_end(file
, mapping
, pos
, len
, copied
, page
, fsdata
);
1627 if (unlikely(ret
< len
)) {
1628 struct inode
*inode
= mapping
->host
;
1629 size_t isize
= i_size_read(inode
);
1630 loff_t to
= pos
+ len
;
1633 truncate_pagecache(inode
, to
, isize
);
1634 xfs_vm_kill_delalloc_range(inode
, isize
, to
);
1642 struct address_space
*mapping
,
1645 struct inode
*inode
= (struct inode
*)mapping
->host
;
1646 struct xfs_inode
*ip
= XFS_I(inode
);
1648 trace_xfs_vm_bmap(XFS_I(inode
));
1649 xfs_ilock(ip
, XFS_IOLOCK_SHARED
);
1650 filemap_write_and_wait(mapping
);
1651 xfs_iunlock(ip
, XFS_IOLOCK_SHARED
);
1652 return generic_block_bmap(mapping
, block
, xfs_get_blocks
);
1657 struct file
*unused
,
1660 return mpage_readpage(page
, xfs_get_blocks
);
1665 struct file
*unused
,
1666 struct address_space
*mapping
,
1667 struct list_head
*pages
,
1670 return mpage_readpages(mapping
, pages
, nr_pages
, xfs_get_blocks
);
1674 * This is basically a copy of __set_page_dirty_buffers() with one
1675 * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
1676 * dirty, we'll never be able to clean them because we don't write buffers
1677 * beyond EOF, and that means we can't invalidate pages that span EOF
1678 * that have been marked dirty. Further, the dirty state can leak into
1679 * the file interior if the file is extended, resulting in all sorts of
1680 * bad things happening as the state does not match the underlying data.
1682 * XXX: this really indicates that bufferheads in XFS need to die. Warts like
1683 * this only exist because of bufferheads and how the generic code manages them.
1686 xfs_vm_set_page_dirty(
1689 struct address_space
*mapping
= page
->mapping
;
1690 struct inode
*inode
= mapping
->host
;
1695 if (unlikely(!mapping
))
1696 return !TestSetPageDirty(page
);
1698 end_offset
= i_size_read(inode
);
1699 offset
= page_offset(page
);
1701 spin_lock(&mapping
->private_lock
);
1702 if (page_has_buffers(page
)) {
1703 struct buffer_head
*head
= page_buffers(page
);
1704 struct buffer_head
*bh
= head
;
1707 if (offset
< end_offset
)
1708 set_buffer_dirty(bh
);
1709 bh
= bh
->b_this_page
;
1710 offset
+= 1 << inode
->i_blkbits
;
1711 } while (bh
!= head
);
1713 newly_dirty
= !TestSetPageDirty(page
);
1714 spin_unlock(&mapping
->private_lock
);
1717 /* sigh - __set_page_dirty() is static, so copy it here, too */
1718 unsigned long flags
;
1720 spin_lock_irqsave(&mapping
->tree_lock
, flags
);
1721 if (page
->mapping
) { /* Race with truncate? */
1722 WARN_ON_ONCE(!PageUptodate(page
));
1723 account_page_dirtied(page
, mapping
);
1724 radix_tree_tag_set(&mapping
->page_tree
,
1725 page_index(page
), PAGECACHE_TAG_DIRTY
);
1727 spin_unlock_irqrestore(&mapping
->tree_lock
, flags
);
1728 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
1733 const struct address_space_operations xfs_address_space_operations
= {
1734 .readpage
= xfs_vm_readpage
,
1735 .readpages
= xfs_vm_readpages
,
1736 .writepage
= xfs_vm_writepage
,
1737 .writepages
= xfs_vm_writepages
,
1738 .set_page_dirty
= xfs_vm_set_page_dirty
,
1739 .releasepage
= xfs_vm_releasepage
,
1740 .invalidatepage
= xfs_vm_invalidatepage
,
1741 .write_begin
= xfs_vm_write_begin
,
1742 .write_end
= xfs_vm_write_end
,
1743 .bmap
= xfs_vm_bmap
,
1744 .direct_IO
= xfs_vm_direct_IO
,
1745 .migratepage
= buffer_migrate_page
,
1746 .is_partially_uptodate
= block_is_partially_uptodate
,
1747 .error_remove_page
= generic_error_remove_page
,