2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
28 * For further information regarding this notice, see:
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
39 #include "xfs_trans.h"
40 #include "xfs_dmapi.h"
41 #include "xfs_mount.h"
42 #include "xfs_bmap_btree.h"
43 #include "xfs_alloc_btree.h"
44 #include "xfs_ialloc_btree.h"
45 #include "xfs_alloc.h"
46 #include "xfs_btree.h"
47 #include "xfs_attr_sf.h"
48 #include "xfs_dir_sf.h"
49 #include "xfs_dir2_sf.h"
50 #include "xfs_dinode.h"
51 #include "xfs_inode.h"
52 #include "xfs_error.h"
54 #include "xfs_iomap.h"
55 #include <linux/mpage.h>
56 #include <linux/writeback.h>
58 STATIC
void xfs_count_page_state(struct page
*, int *, int *, int *);
59 STATIC
void xfs_convert_page(struct inode
*, struct page
*, xfs_iomap_t
*,
60 struct writeback_control
*wbc
, void *, int, int);
62 #if defined(XFS_RW_TRACE)
72 vnode_t
*vp
= LINVFS_GET_VP(inode
);
73 loff_t isize
= i_size_read(inode
);
74 loff_t offset
= (loff_t
)page
->index
<< PAGE_CACHE_SHIFT
;
75 int delalloc
= -1, unmapped
= -1, unwritten
= -1;
77 if (page_has_buffers(page
))
78 xfs_count_page_state(page
, &delalloc
, &unmapped
, &unwritten
);
80 bdp
= vn_bhv_lookup(VN_BHV_HEAD(vp
), &xfs_vnodeops
);
85 ktrace_enter(ip
->i_rwtrace
,
86 (void *)((unsigned long)tag
),
90 (void *)((unsigned long)mask
),
91 (void *)((unsigned long)((ip
->i_d
.di_size
>> 32) & 0xffffffff)),
92 (void *)((unsigned long)(ip
->i_d
.di_size
& 0xffffffff)),
93 (void *)((unsigned long)((isize
>> 32) & 0xffffffff)),
94 (void *)((unsigned long)(isize
& 0xffffffff)),
95 (void *)((unsigned long)((offset
>> 32) & 0xffffffff)),
96 (void *)((unsigned long)(offset
& 0xffffffff)),
97 (void *)((unsigned long)delalloc
),
98 (void *)((unsigned long)unmapped
),
99 (void *)((unsigned long)unwritten
),
104 #define xfs_page_trace(tag, inode, page, mask)
108 * Schedule IO completion handling on a xfsdatad if this was
109 * the final hold on this ioend.
115 if (atomic_dec_and_test(&ioend
->io_remaining
))
116 queue_work(xfsdatad_workqueue
, &ioend
->io_work
);
123 vn_iowake(ioend
->io_vnode
);
124 mempool_free(ioend
, xfs_ioend_pool
);
128 * Issue transactions to convert a buffer range from unwritten
129 * to written extents.
132 xfs_end_bio_unwritten(
135 xfs_ioend_t
*ioend
= data
;
136 vnode_t
*vp
= ioend
->io_vnode
;
137 xfs_off_t offset
= ioend
->io_offset
;
138 size_t size
= ioend
->io_size
;
139 struct buffer_head
*bh
, *next
;
142 if (ioend
->io_uptodate
)
143 VOP_BMAP(vp
, offset
, size
, BMAPI_UNWRITTEN
, NULL
, NULL
, error
);
145 /* ioend->io_buffer_head is only non-NULL for buffered I/O */
146 for (bh
= ioend
->io_buffer_head
; bh
; bh
= next
) {
147 next
= bh
->b_private
;
150 clear_buffer_unwritten(bh
);
151 end_buffer_async_write(bh
, ioend
->io_uptodate
);
154 xfs_destroy_ioend(ioend
);
158 * Allocate and initialise an IO completion structure.
159 * We need to track unwritten extent write completion here initially.
160 * We'll need to extend this for updating the ondisk inode size later
169 ioend
= mempool_alloc(xfs_ioend_pool
, GFP_NOFS
);
172 * Set the count to 1 initially, which will prevent an I/O
173 * completion callback from happening before we have started
174 * all the I/O from calling the completion routine too early.
176 atomic_set(&ioend
->io_remaining
, 1);
177 ioend
->io_uptodate
= 1; /* cleared if any I/O fails */
178 ioend
->io_vnode
= LINVFS_GET_VP(inode
);
179 ioend
->io_buffer_head
= NULL
;
180 atomic_inc(&ioend
->io_vnode
->v_iocount
);
181 ioend
->io_offset
= 0;
184 INIT_WORK(&ioend
->io_work
, xfs_end_bio_unwritten
, ioend
);
190 linvfs_unwritten_done(
191 struct buffer_head
*bh
,
194 xfs_ioend_t
*ioend
= bh
->b_private
;
195 static spinlock_t unwritten_done_lock
= SPIN_LOCK_UNLOCKED
;
198 ASSERT(buffer_unwritten(bh
));
202 ioend
->io_uptodate
= 0;
205 * Deep magic here. We reuse b_private in the buffer_heads to build
206 * a chain for completing the I/O from user context after we've issued
207 * a transaction to convert the unwritten extent.
209 spin_lock_irqsave(&unwritten_done_lock
, flags
);
210 bh
->b_private
= ioend
->io_buffer_head
;
211 ioend
->io_buffer_head
= bh
;
212 spin_unlock_irqrestore(&unwritten_done_lock
, flags
);
214 xfs_finish_ioend(ioend
);
225 vnode_t
*vp
= LINVFS_GET_VP(inode
);
226 int error
, nmaps
= 1;
228 VOP_BMAP(vp
, offset
, count
, flags
, mapp
, &nmaps
, error
);
229 if (!error
&& (flags
& (BMAPI_WRITE
|BMAPI_ALLOCATE
)))
235 * Finds the corresponding mapping in block @map array of the
236 * given @offset within a @page.
242 unsigned long offset
)
244 loff_t full_offset
; /* offset from start of file */
246 ASSERT(offset
< PAGE_CACHE_SIZE
);
248 full_offset
= page
->index
; /* NB: using 64bit number */
249 full_offset
<<= PAGE_CACHE_SHIFT
; /* offset from file start */
250 full_offset
+= offset
; /* offset from page start */
252 if (full_offset
< iomapp
->iomap_offset
)
254 if (iomapp
->iomap_offset
+ (iomapp
->iomap_bsize
-1) >= full_offset
)
262 struct buffer_head
*bh
,
263 unsigned long offset
,
271 ASSERT(!(iomapp
->iomap_flags
& IOMAP_HOLE
));
272 ASSERT(!(iomapp
->iomap_flags
& IOMAP_DELAY
));
273 ASSERT(iomapp
->iomap_bn
!= IOMAP_DADDR_NULL
);
276 delta
<<= PAGE_CACHE_SHIFT
;
278 delta
-= iomapp
->iomap_offset
;
279 delta
>>= block_bits
;
281 sector_shift
= block_bits
- BBSHIFT
;
282 bn
= iomapp
->iomap_bn
>> sector_shift
;
284 BUG_ON(!bn
&& !(iomapp
->iomap_flags
& IOMAP_REALTIME
));
285 ASSERT((bn
<< sector_shift
) >= iomapp
->iomap_bn
);
289 bh
->b_bdev
= iomapp
->iomap_target
->pbr_bdev
;
290 set_buffer_mapped(bh
);
291 clear_buffer_delay(bh
);
295 * Look for a page at index which is unlocked and contains our
296 * unwritten extent flagged buffers at its head. Returns page
297 * locked and with an extra reference count, and length of the
298 * unwritten extent component on this page that we can write,
299 * in units of filesystem blocks.
302 xfs_probe_unwritten_page(
303 struct address_space
*mapping
,
307 unsigned long max_offset
,
313 page
= find_trylock_page(mapping
, index
);
316 if (PageWriteback(page
))
319 if (page
->mapping
&& page_has_buffers(page
)) {
320 struct buffer_head
*bh
, *head
;
321 unsigned long p_offset
= 0;
324 bh
= head
= page_buffers(page
);
326 if (!buffer_unwritten(bh
) || !buffer_uptodate(bh
))
328 if (!xfs_offset_to_map(page
, iomapp
, p_offset
))
330 if (p_offset
>= max_offset
)
332 xfs_map_at_offset(page
, bh
, p_offset
, bbits
, iomapp
);
333 set_buffer_unwritten_io(bh
);
334 bh
->b_private
= ioend
;
335 p_offset
+= bh
->b_size
;
337 } while ((bh
= bh
->b_this_page
) != head
);
349 * Look for a page at index which is unlocked and not mapped
350 * yet - clustering for mmap write case.
353 xfs_probe_unmapped_page(
354 struct address_space
*mapping
,
356 unsigned int pg_offset
)
361 page
= find_trylock_page(mapping
, index
);
364 if (PageWriteback(page
))
367 if (page
->mapping
&& PageDirty(page
)) {
368 if (page_has_buffers(page
)) {
369 struct buffer_head
*bh
, *head
;
371 bh
= head
= page_buffers(page
);
373 if (buffer_mapped(bh
) || !buffer_uptodate(bh
))
376 if (ret
>= pg_offset
)
378 } while ((bh
= bh
->b_this_page
) != head
);
380 ret
= PAGE_CACHE_SIZE
;
389 xfs_probe_unmapped_cluster(
391 struct page
*startpage
,
392 struct buffer_head
*bh
,
393 struct buffer_head
*head
)
395 pgoff_t tindex
, tlast
, tloff
;
396 unsigned int pg_offset
, len
, total
= 0;
397 struct address_space
*mapping
= inode
->i_mapping
;
399 /* First sum forwards in this page */
401 if (buffer_mapped(bh
))
404 } while ((bh
= bh
->b_this_page
) != head
);
406 /* If we reached the end of the page, sum forwards in
410 tlast
= i_size_read(inode
) >> PAGE_CACHE_SHIFT
;
411 /* Prune this back to avoid pathological behavior */
412 tloff
= min(tlast
, startpage
->index
+ 64);
413 for (tindex
= startpage
->index
+ 1; tindex
< tloff
; tindex
++) {
414 len
= xfs_probe_unmapped_page(mapping
, tindex
,
420 if (tindex
== tlast
&&
421 (pg_offset
= i_size_read(inode
) & (PAGE_CACHE_SIZE
- 1))) {
422 total
+= xfs_probe_unmapped_page(mapping
,
430 * Probe for a given page (index) in the inode and test if it is delayed
431 * and without unwritten buffers. Returns page locked and with an extra
435 xfs_probe_delalloc_page(
441 page
= find_trylock_page(inode
->i_mapping
, index
);
444 if (PageWriteback(page
))
447 if (page
->mapping
&& page_has_buffers(page
)) {
448 struct buffer_head
*bh
, *head
;
451 bh
= head
= page_buffers(page
);
453 if (buffer_unwritten(bh
)) {
456 } else if (buffer_delay(bh
)) {
459 } while ((bh
= bh
->b_this_page
) != head
);
473 struct page
*start_page
,
474 struct buffer_head
*head
,
475 struct buffer_head
*curr
,
476 unsigned long p_offset
,
479 struct writeback_control
*wbc
,
483 struct buffer_head
*bh
= curr
;
487 unsigned long nblocks
= 0;
489 offset
= start_page
->index
;
490 offset
<<= PAGE_CACHE_SHIFT
;
493 ioend
= xfs_alloc_ioend(inode
);
495 /* First map forwards in the page consecutive buffers
496 * covering this unwritten extent
499 if (!buffer_unwritten(bh
))
501 tmp
= xfs_offset_to_map(start_page
, iomapp
, p_offset
);
504 xfs_map_at_offset(start_page
, bh
, p_offset
, block_bits
, iomapp
);
505 set_buffer_unwritten_io(bh
);
506 bh
->b_private
= ioend
;
507 p_offset
+= bh
->b_size
;
509 } while ((bh
= bh
->b_this_page
) != head
);
511 atomic_add(nblocks
, &ioend
->io_remaining
);
513 /* If we reached the end of the page, map forwards in any
514 * following pages which are also covered by this extent.
517 struct address_space
*mapping
= inode
->i_mapping
;
518 pgoff_t tindex
, tloff
, tlast
;
520 unsigned int pg_offset
, bbits
= inode
->i_blkbits
;
523 tlast
= i_size_read(inode
) >> PAGE_CACHE_SHIFT
;
524 tloff
= (iomapp
->iomap_offset
+ iomapp
->iomap_bsize
) >> PAGE_CACHE_SHIFT
;
525 tloff
= min(tlast
, tloff
);
526 for (tindex
= start_page
->index
+ 1; tindex
< tloff
; tindex
++) {
527 page
= xfs_probe_unwritten_page(mapping
,
528 tindex
, iomapp
, ioend
,
529 PAGE_CACHE_SIZE
, &bs
, bbits
);
533 atomic_add(bs
, &ioend
->io_remaining
);
534 xfs_convert_page(inode
, page
, iomapp
, wbc
, ioend
,
536 /* stop if converting the next page might add
537 * enough blocks that the corresponding byte
538 * count won't fit in our ulong page buf length */
539 if (nblocks
>= ((ULONG_MAX
- PAGE_SIZE
) >> block_bits
))
543 if (tindex
== tlast
&&
544 (pg_offset
= (i_size_read(inode
) & (PAGE_CACHE_SIZE
- 1)))) {
545 page
= xfs_probe_unwritten_page(mapping
,
546 tindex
, iomapp
, ioend
,
547 pg_offset
, &bs
, bbits
);
550 atomic_add(bs
, &ioend
->io_remaining
);
551 xfs_convert_page(inode
, page
, iomapp
, wbc
, ioend
,
553 if (nblocks
>= ((ULONG_MAX
- PAGE_SIZE
) >> block_bits
))
560 ioend
->io_size
= (xfs_off_t
)nblocks
<< block_bits
;
561 ioend
->io_offset
= offset
;
562 xfs_finish_ioend(ioend
);
569 struct writeback_control
*wbc
,
570 struct buffer_head
*bh_arr
[],
575 struct buffer_head
*bh
;
578 BUG_ON(PageWriteback(page
));
580 set_page_writeback(page
);
582 clear_page_dirty(page
);
586 for (i
= 0; i
< bh_count
; i
++) {
588 mark_buffer_async_write(bh
);
589 if (buffer_unwritten(bh
))
590 set_buffer_unwritten_io(bh
);
591 set_buffer_uptodate(bh
);
592 clear_buffer_dirty(bh
);
595 for (i
= 0; i
< bh_count
; i
++)
596 submit_bh(WRITE
, bh_arr
[i
]);
598 if (probed_page
&& clear_dirty
)
599 wbc
->nr_to_write
--; /* Wrote an "extra" page */
604 * Allocate & map buffers for page given the extent map. Write it out.
605 * except for the original page of a writepage, this is called on
606 * delalloc/unwritten pages only, for the original page it is possible
607 * that the page has no mapping at all.
614 struct writeback_control
*wbc
,
619 struct buffer_head
*bh_arr
[MAX_BUF_PER_PAGE
], *bh
, *head
;
620 xfs_iomap_t
*mp
= iomapp
, *tmp
;
621 unsigned long offset
, end_offset
;
623 int bbits
= inode
->i_blkbits
;
626 end_offset
= (i_size_read(inode
) & (PAGE_CACHE_SIZE
- 1));
629 * page_dirty is initially a count of buffers on the page before
630 * EOF and is decrememted as we move each into a cleanable state.
632 len
= 1 << inode
->i_blkbits
;
633 end_offset
= max(end_offset
, PAGE_CACHE_SIZE
);
634 end_offset
= roundup(end_offset
, len
);
635 page_dirty
= end_offset
/ len
;
638 bh
= head
= page_buffers(page
);
640 if (offset
>= end_offset
)
642 if (!(PageUptodate(page
) || buffer_uptodate(bh
)))
644 if (buffer_mapped(bh
) && all_bh
&&
645 !(buffer_unwritten(bh
) || buffer_delay(bh
))) {
648 bh_arr
[index
++] = bh
;
653 tmp
= xfs_offset_to_map(page
, mp
, offset
);
656 ASSERT(!(tmp
->iomap_flags
& IOMAP_HOLE
));
657 ASSERT(!(tmp
->iomap_flags
& IOMAP_DELAY
));
659 /* If this is a new unwritten extent buffer (i.e. one
660 * that we haven't passed in private data for, we must
661 * now map this buffer too.
663 if (buffer_unwritten(bh
) && !bh
->b_end_io
) {
664 ASSERT(tmp
->iomap_flags
& IOMAP_UNWRITTEN
);
665 xfs_map_unwritten(inode
, page
, head
, bh
, offset
,
666 bbits
, tmp
, wbc
, startio
, all_bh
);
667 } else if (! (buffer_unwritten(bh
) && buffer_locked(bh
))) {
668 xfs_map_at_offset(page
, bh
, offset
, bbits
, tmp
);
669 if (buffer_unwritten(bh
)) {
670 set_buffer_unwritten_io(bh
);
671 bh
->b_private
= private;
676 bh_arr
[index
++] = bh
;
678 set_buffer_dirty(bh
);
680 mark_buffer_dirty(bh
);
683 } while (offset
+= len
, (bh
= bh
->b_this_page
) != head
);
685 if (startio
&& index
) {
686 xfs_submit_page(page
, wbc
, bh_arr
, index
, 1, !page_dirty
);
693 * Convert & write out a cluster of pages in the same extent as defined
694 * by mp and following the start page.
701 struct writeback_control
*wbc
,
708 for (; tindex
<= tlast
; tindex
++) {
709 page
= xfs_probe_delalloc_page(inode
, tindex
);
712 xfs_convert_page(inode
, page
, iomapp
, wbc
, NULL
,
718 * Calling this without startio set means we are being asked to make a dirty
719 * page ready for freeing it's buffers. When called with startio set then
720 * we are coming from writepage.
722 * When called with startio set it is important that we write the WHOLE
724 * The bh->b_state's cannot know if any of the blocks or which block for
725 * that matter are dirty due to mmap writes, and therefore bh uptodate is
726 * only vaild if the page itself isn't completely uptodate. Some layers
727 * may clear the page dirty flag prior to calling write page, under the
728 * assumption the entire page will be written out; by not writing out the
729 * whole page the page can be reused before all valid dirty data is
730 * written out. Note: in the case of a page that has been dirty'd by
731 * mapwrite and but partially setup by block_prepare_write the
732 * bh->b_states's will not agree and only ones setup by BPW/BCW will have
733 * valid state, thus the whole page must be written out thing.
737 xfs_page_state_convert(
740 struct writeback_control
*wbc
,
742 int unmapped
) /* also implies page uptodate */
744 struct buffer_head
*bh_arr
[MAX_BUF_PER_PAGE
], *bh
, *head
;
745 xfs_iomap_t
*iomp
, iomap
;
747 unsigned long p_offset
= 0;
748 __uint64_t end_offset
;
749 pgoff_t end_index
, last_index
, tlast
;
750 int len
, err
, i
, cnt
= 0, uptodate
= 1;
754 /* wait for other IO threads? */
755 flags
= (startio
&& wbc
->sync_mode
!= WB_SYNC_NONE
) ? 0 : BMAPI_TRYLOCK
;
757 /* Is this page beyond the end of the file? */
758 offset
= i_size_read(inode
);
759 end_index
= offset
>> PAGE_CACHE_SHIFT
;
760 last_index
= (offset
- 1) >> PAGE_CACHE_SHIFT
;
761 if (page
->index
>= end_index
) {
762 if ((page
->index
>= end_index
+ 1) ||
763 !(i_size_read(inode
) & (PAGE_CACHE_SIZE
- 1))) {
769 end_offset
= min_t(unsigned long long,
770 (loff_t
)(page
->index
+ 1) << PAGE_CACHE_SHIFT
, offset
);
771 offset
= (loff_t
)page
->index
<< PAGE_CACHE_SHIFT
;
774 * page_dirty is initially a count of buffers on the page before
775 * EOF and is decrememted as we move each into a cleanable state.
777 len
= 1 << inode
->i_blkbits
;
778 p_offset
= max(p_offset
, PAGE_CACHE_SIZE
);
779 p_offset
= roundup(p_offset
, len
);
780 page_dirty
= p_offset
/ len
;
784 bh
= head
= page_buffers(page
);
787 if (offset
>= end_offset
)
789 if (!buffer_uptodate(bh
))
791 if (!(PageUptodate(page
) || buffer_uptodate(bh
)) && !startio
)
795 iomp
= xfs_offset_to_map(page
, &iomap
, p_offset
);
799 * First case, map an unwritten extent and prepare for
800 * extent state conversion transaction on completion.
802 if (buffer_unwritten(bh
)) {
806 err
= xfs_map_blocks(inode
, offset
, len
, &iomap
,
807 BMAPI_WRITE
|BMAPI_IGNSTATE
);
811 iomp
= xfs_offset_to_map(page
, &iomap
,
816 err
= xfs_map_unwritten(inode
, page
,
818 inode
->i_blkbits
, iomp
,
819 wbc
, startio
, unmapped
);
824 set_bit(BH_Lock
, &bh
->b_state
);
826 BUG_ON(!buffer_locked(bh
));
831 * Second case, allocate space for a delalloc buffer.
832 * We can return EAGAIN here in the release page case.
834 } else if (buffer_delay(bh
)) {
836 err
= xfs_map_blocks(inode
, offset
, len
, &iomap
,
837 BMAPI_ALLOCATE
| flags
);
841 iomp
= xfs_offset_to_map(page
, &iomap
,
845 xfs_map_at_offset(page
, bh
, p_offset
,
846 inode
->i_blkbits
, iomp
);
850 set_buffer_dirty(bh
);
852 mark_buffer_dirty(bh
);
856 } else if ((buffer_uptodate(bh
) || PageUptodate(page
)) &&
857 (unmapped
|| startio
)) {
859 if (!buffer_mapped(bh
)) {
863 * Getting here implies an unmapped buffer
864 * was found, and we are in a path where we
865 * need to write the whole page out.
868 size
= xfs_probe_unmapped_cluster(
869 inode
, page
, bh
, head
);
870 err
= xfs_map_blocks(inode
, offset
,
872 BMAPI_WRITE
|BMAPI_MMAP
);
876 iomp
= xfs_offset_to_map(page
, &iomap
,
880 xfs_map_at_offset(page
,
882 inode
->i_blkbits
, iomp
);
886 set_buffer_dirty(bh
);
888 mark_buffer_dirty(bh
);
892 } else if (startio
) {
893 if (buffer_uptodate(bh
) &&
894 !test_and_set_bit(BH_Lock
, &bh
->b_state
)) {
900 } while (offset
+= len
, p_offset
+= len
,
901 ((bh
= bh
->b_this_page
) != head
));
903 if (uptodate
&& bh
== head
)
904 SetPageUptodate(page
);
907 xfs_submit_page(page
, wbc
, bh_arr
, cnt
, 0, !page_dirty
);
911 offset
= (iomp
->iomap_offset
+ iomp
->iomap_bsize
- 1) >>
913 tlast
= min_t(pgoff_t
, offset
, last_index
);
914 xfs_cluster_write(inode
, page
->index
+ 1, iomp
, wbc
,
915 startio
, unmapped
, tlast
);
921 for (i
= 0; i
< cnt
; i
++) {
922 unlock_buffer(bh_arr
[i
]);
926 * If it's delalloc and we have nowhere to put it,
927 * throw it away, unless the lower layers told
930 if (err
!= -EAGAIN
) {
932 block_invalidatepage(page
, 0);
934 ClearPageUptodate(page
);
943 unsigned long blocks
,
944 struct buffer_head
*bh_result
,
949 vnode_t
*vp
= LINVFS_GET_VP(inode
);
954 loff_t offset
= (loff_t
)iblock
<< inode
->i_blkbits
;
957 size
= blocks
<< inode
->i_blkbits
;
959 size
= 1 << inode
->i_blkbits
;
961 VOP_BMAP(vp
, offset
, size
,
962 create
? flags
: BMAPI_READ
, &iomap
, &retpbbm
, error
);
969 if (iomap
.iomap_bn
!= IOMAP_DADDR_NULL
) {
973 /* For unwritten extents do not report a disk address on
974 * the read case (treat as if we're reading into a hole).
976 if (create
|| !(iomap
.iomap_flags
& IOMAP_UNWRITTEN
)) {
977 delta
= offset
- iomap
.iomap_offset
;
978 delta
>>= inode
->i_blkbits
;
980 bn
= iomap
.iomap_bn
>> (inode
->i_blkbits
- BBSHIFT
);
982 BUG_ON(!bn
&& !(iomap
.iomap_flags
& IOMAP_REALTIME
));
983 bh_result
->b_blocknr
= bn
;
984 set_buffer_mapped(bh_result
);
986 if (create
&& (iomap
.iomap_flags
& IOMAP_UNWRITTEN
)) {
988 bh_result
->b_private
= inode
;
989 set_buffer_unwritten(bh_result
);
990 set_buffer_delay(bh_result
);
994 /* If this is a realtime file, data might be on a new device */
995 bh_result
->b_bdev
= iomap
.iomap_target
->pbr_bdev
;
997 /* If we previously allocated a block out beyond eof and
998 * we are now coming back to use it then we will need to
999 * flag it as new even if it has a disk address.
1002 ((!buffer_mapped(bh_result
) && !buffer_uptodate(bh_result
)) ||
1003 (offset
>= i_size_read(inode
)) || (iomap
.iomap_flags
& IOMAP_NEW
))) {
1004 set_buffer_new(bh_result
);
1007 if (iomap
.iomap_flags
& IOMAP_DELAY
) {
1010 set_buffer_uptodate(bh_result
);
1011 set_buffer_mapped(bh_result
);
1012 set_buffer_delay(bh_result
);
1017 bh_result
->b_size
= (ssize_t
)min(
1018 (loff_t
)(iomap
.iomap_bsize
- iomap
.iomap_delta
),
1019 (loff_t
)(blocks
<< inode
->i_blkbits
));
1027 struct inode
*inode
,
1029 struct buffer_head
*bh_result
,
1032 return __linvfs_get_block(inode
, iblock
, 0, bh_result
,
1033 create
, 0, BMAPI_WRITE
);
1037 linvfs_get_blocks_direct(
1038 struct inode
*inode
,
1040 unsigned long max_blocks
,
1041 struct buffer_head
*bh_result
,
1044 return __linvfs_get_block(inode
, iblock
, max_blocks
, bh_result
,
1045 create
, 1, BMAPI_WRITE
|BMAPI_DIRECT
);
1049 linvfs_end_io_direct(
1055 xfs_ioend_t
*ioend
= iocb
->private;
1058 * Non-NULL private data means we need to issue a transaction to
1059 * convert a range from unwritten to written extents. This needs
1060 * to happen from process contect but aio+dio I/O completion
1061 * happens from irq context so we need to defer it to a workqueue.
1062 * This is not nessecary for synchronous direct I/O, but we do
1063 * it anyway to keep the code uniform and simpler.
1065 * The core direct I/O code might be changed to always call the
1066 * completion handler in the future, in which case all this can
1069 if (private && size
> 0) {
1070 ioend
->io_offset
= offset
;
1071 ioend
->io_size
= size
;
1072 xfs_finish_ioend(ioend
);
1075 xfs_destroy_ioend(ioend
);
1079 * blockdev_direct_IO can return an error even afer the I/O
1080 * completion handler was called. Thus we need to protect
1081 * against double-freeing.
1083 iocb
->private = NULL
;
1090 const struct iovec
*iov
,
1092 unsigned long nr_segs
)
1094 struct file
*file
= iocb
->ki_filp
;
1095 struct inode
*inode
= file
->f_mapping
->host
;
1096 vnode_t
*vp
= LINVFS_GET_VP(inode
);
1102 VOP_BMAP(vp
, offset
, 0, BMAPI_DEVICE
, &iomap
, &maps
, error
);
1106 iocb
->private = xfs_alloc_ioend(inode
);
1108 ret
= blockdev_direct_IO_own_locking(rw
, iocb
, inode
,
1109 iomap
.iomap_target
->pbr_bdev
,
1110 iov
, offset
, nr_segs
,
1111 linvfs_get_blocks_direct
,
1112 linvfs_end_io_direct
);
1114 if (unlikely(ret
<= 0 && iocb
->private))
1115 xfs_destroy_ioend(iocb
->private);
1122 struct address_space
*mapping
,
1125 struct inode
*inode
= (struct inode
*)mapping
->host
;
1126 vnode_t
*vp
= LINVFS_GET_VP(inode
);
1129 vn_trace_entry(vp
, "linvfs_bmap", (inst_t
*)__return_address
);
1131 VOP_RWLOCK(vp
, VRWLOCK_READ
);
1132 VOP_FLUSH_PAGES(vp
, (xfs_off_t
)0, -1, 0, FI_REMAPF
, error
);
1133 VOP_RWUNLOCK(vp
, VRWLOCK_READ
);
1134 return generic_block_bmap(mapping
, block
, linvfs_get_block
);
1139 struct file
*unused
,
1142 return mpage_readpage(page
, linvfs_get_block
);
1147 struct file
*unused
,
1148 struct address_space
*mapping
,
1149 struct list_head
*pages
,
1152 return mpage_readpages(mapping
, pages
, nr_pages
, linvfs_get_block
);
1156 xfs_count_page_state(
1162 struct buffer_head
*bh
, *head
;
1164 *delalloc
= *unmapped
= *unwritten
= 0;
1166 bh
= head
= page_buffers(page
);
1168 if (buffer_uptodate(bh
) && !buffer_mapped(bh
))
1170 else if (buffer_unwritten(bh
) && !buffer_delay(bh
))
1171 clear_buffer_unwritten(bh
);
1172 else if (buffer_unwritten(bh
))
1174 else if (buffer_delay(bh
))
1176 } while ((bh
= bh
->b_this_page
) != head
);
1181 * writepage: Called from one of two places:
1183 * 1. we are flushing a delalloc buffer head.
1185 * 2. we are writing out a dirty page. Typically the page dirty
1186 * state is cleared before we get here. In this case is it
1187 * conceivable we have no buffer heads.
1189 * For delalloc space on the page we need to allocate space and
1190 * flush it. For unmapped buffer heads on the page we should
1191 * allocate space if the page is uptodate. For any other dirty
1192 * buffer heads on the page we should flush them.
1194 * If we detect that a transaction would be required to flush
1195 * the page, we have to check the process flags first, if we
1196 * are already in a transaction or disk I/O during allocations
1197 * is off, we need to fail the writepage and redirty the page.
1203 struct writeback_control
*wbc
)
1207 int delalloc
, unmapped
, unwritten
;
1208 struct inode
*inode
= page
->mapping
->host
;
1210 xfs_page_trace(XFS_WRITEPAGE_ENTER
, inode
, page
, 0);
1213 * We need a transaction if:
1214 * 1. There are delalloc buffers on the page
1215 * 2. The page is uptodate and we have unmapped buffers
1216 * 3. The page is uptodate and we have no buffers
1217 * 4. There are unwritten buffers on the page
1220 if (!page_has_buffers(page
)) {
1224 xfs_count_page_state(page
, &delalloc
, &unmapped
, &unwritten
);
1225 if (!PageUptodate(page
))
1227 need_trans
= delalloc
+ unmapped
+ unwritten
;
1231 * If we need a transaction and the process flags say
1232 * we are already in a transaction, or no IO is allowed
1233 * then mark the page dirty again and leave the page
1236 if (PFLAGS_TEST_FSTRANS() && need_trans
)
1240 * Delay hooking up buffer heads until we have
1241 * made our go/no-go decision.
1243 if (!page_has_buffers(page
))
1244 create_empty_buffers(page
, 1 << inode
->i_blkbits
, 0);
1247 * Convert delayed allocate, unwritten or unmapped space
1248 * to real space and flush out to disk.
1250 error
= xfs_page_state_convert(inode
, page
, wbc
, 1, unmapped
);
1251 if (error
== -EAGAIN
)
1253 if (unlikely(error
< 0))
1259 redirty_page_for_writepage(wbc
, page
);
1268 linvfs_invalidate_page(
1270 unsigned long offset
)
1272 xfs_page_trace(XFS_INVALIDPAGE_ENTER
,
1273 page
->mapping
->host
, page
, offset
);
1274 return block_invalidatepage(page
, offset
);
1278 * Called to move a page into cleanable state - and from there
1279 * to be released. Possibly the page is already clean. We always
1280 * have buffer heads in this call.
1282 * Returns 0 if the page is ok to release, 1 otherwise.
1284 * Possible scenarios are:
1286 * 1. We are being called to release a page which has been written
1287 * to via regular I/O. buffer heads will be dirty and possibly
1288 * delalloc. If no delalloc buffer heads in this case then we
1289 * can just return zero.
1291 * 2. We are called to release a page which has been written via
1292 * mmap, all we need to do is ensure there is no delalloc
1293 * state in the buffer heads, if not we can let the caller
1294 * free them and we should come back later via writepage.
1297 linvfs_release_page(
1301 struct inode
*inode
= page
->mapping
->host
;
1302 int dirty
, delalloc
, unmapped
, unwritten
;
1303 struct writeback_control wbc
= {
1304 .sync_mode
= WB_SYNC_ALL
,
1308 xfs_page_trace(XFS_RELEASEPAGE_ENTER
, inode
, page
, gfp_mask
);
1310 xfs_count_page_state(page
, &delalloc
, &unmapped
, &unwritten
);
1311 if (!delalloc
&& !unwritten
)
1314 if (!(gfp_mask
& __GFP_FS
))
1317 /* If we are already inside a transaction or the thread cannot
1318 * do I/O, we cannot release this page.
1320 if (PFLAGS_TEST_FSTRANS())
1324 * Convert delalloc space to real space, do not flush the
1325 * data out to disk, that will be done by the caller.
1326 * Never need to allocate space here - we will always
1327 * come back to writepage in that case.
1329 dirty
= xfs_page_state_convert(inode
, page
, &wbc
, 0, 0);
1330 if (dirty
== 0 && !unwritten
)
1335 return try_to_free_buffers(page
);
1339 linvfs_prepare_write(
1345 return block_prepare_write(page
, from
, to
, linvfs_get_block
);
1348 struct address_space_operations linvfs_aops
= {
1349 .readpage
= linvfs_readpage
,
1350 .readpages
= linvfs_readpages
,
1351 .writepage
= linvfs_writepage
,
1352 .sync_page
= block_sync_page
,
1353 .releasepage
= linvfs_release_page
,
1354 .invalidatepage
= linvfs_invalidate_page
,
1355 .prepare_write
= linvfs_prepare_write
,
1356 .commit_write
= generic_commit_write
,
1357 .bmap
= linvfs_bmap
,
1358 .direct_IO
= linvfs_direct_IO
,