2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/backing-dev.h>
26 #include <linux/mpage.h>
27 #include <linux/falloc.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
32 #include <linux/slab.h>
35 #include "transaction.h"
36 #include "btrfs_inode.h"
38 #include "print-tree.h"
44 /* simple helper to fault in pages and copy. This should go away
45 * and be replaced with calls into generic code.
47 static noinline
int btrfs_copy_from_user(loff_t pos
, int num_pages
,
49 struct page
**prepared_pages
,
53 size_t total_copied
= 0;
55 int offset
= pos
& (PAGE_CACHE_SIZE
- 1);
57 while (write_bytes
> 0) {
58 size_t count
= min_t(size_t,
59 PAGE_CACHE_SIZE
- offset
, write_bytes
);
60 struct page
*page
= prepared_pages
[pg
];
62 * Copy data from userspace to the current page
64 * Disable pagefault to avoid recursive lock since
65 * the pages are already locked
68 copied
= iov_iter_copy_from_user_atomic(page
, i
, offset
, count
);
71 /* Flush processor's dcache for this page */
72 flush_dcache_page(page
);
75 * if we get a partial write, we can end up with
76 * partially up to date pages. These add
77 * a lot of complexity, so make sure they don't
78 * happen by forcing this copy to be retried.
80 * The rest of the btrfs_file_write code will fall
81 * back to page at a time copies after we return 0.
83 if (!PageUptodate(page
) && copied
< count
)
86 iov_iter_advance(i
, copied
);
87 write_bytes
-= copied
;
88 total_copied
+= copied
;
90 /* Return to btrfs_file_aio_write to fault page */
91 if (unlikely(copied
== 0))
94 if (unlikely(copied
< PAGE_CACHE_SIZE
- offset
)) {
105 * unlocks pages after btrfs_file_write is done with them
107 void btrfs_drop_pages(struct page
**pages
, size_t num_pages
)
110 for (i
= 0; i
< num_pages
; i
++) {
111 /* page checked is some magic around finding pages that
112 * have been modified without going through btrfs_set_page_dirty
115 ClearPageChecked(pages
[i
]);
116 unlock_page(pages
[i
]);
117 mark_page_accessed(pages
[i
]);
118 page_cache_release(pages
[i
]);
123 * after copy_from_user, pages need to be dirtied and we need to make
124 * sure holes are created between the current EOF and the start of
125 * any next extents (if required).
127 * this also makes the decision about creating an inline extent vs
128 * doing real data extents, marking pages dirty and delalloc as required.
130 int btrfs_dirty_pages(struct btrfs_root
*root
, struct inode
*inode
,
131 struct page
**pages
, size_t num_pages
,
132 loff_t pos
, size_t write_bytes
,
133 struct extent_state
**cached
)
139 u64 end_of_last_block
;
140 u64 end_pos
= pos
+ write_bytes
;
141 loff_t isize
= i_size_read(inode
);
143 start_pos
= pos
& ~((u64
)root
->sectorsize
- 1);
144 num_bytes
= (write_bytes
+ pos
- start_pos
+
145 root
->sectorsize
- 1) & ~((u64
)root
->sectorsize
- 1);
147 end_of_last_block
= start_pos
+ num_bytes
- 1;
148 err
= btrfs_set_extent_delalloc(inode
, start_pos
, end_of_last_block
,
153 for (i
= 0; i
< num_pages
; i
++) {
154 struct page
*p
= pages
[i
];
161 * we've only changed i_size in ram, and we haven't updated
162 * the disk i_size. There is no need to log the inode
166 i_size_write(inode
, end_pos
);
171 * this drops all the extents in the cache that intersect the range
172 * [start, end]. Existing extents are split as required.
174 int btrfs_drop_extent_cache(struct inode
*inode
, u64 start
, u64 end
,
177 struct extent_map
*em
;
178 struct extent_map
*split
= NULL
;
179 struct extent_map
*split2
= NULL
;
180 struct extent_map_tree
*em_tree
= &BTRFS_I(inode
)->extent_tree
;
181 u64 len
= end
- start
+ 1;
187 WARN_ON(end
< start
);
188 if (end
== (u64
)-1) {
194 split
= alloc_extent_map(GFP_NOFS
);
196 split2
= alloc_extent_map(GFP_NOFS
);
197 BUG_ON(!split
|| !split2
);
199 write_lock(&em_tree
->lock
);
200 em
= lookup_extent_mapping(em_tree
, start
, len
);
202 write_unlock(&em_tree
->lock
);
206 if (skip_pinned
&& test_bit(EXTENT_FLAG_PINNED
, &em
->flags
)) {
207 if (testend
&& em
->start
+ em
->len
>= start
+ len
) {
209 write_unlock(&em_tree
->lock
);
212 start
= em
->start
+ em
->len
;
214 len
= start
+ len
- (em
->start
+ em
->len
);
216 write_unlock(&em_tree
->lock
);
219 compressed
= test_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
);
220 clear_bit(EXTENT_FLAG_PINNED
, &em
->flags
);
221 remove_extent_mapping(em_tree
, em
);
223 if (em
->block_start
< EXTENT_MAP_LAST_BYTE
&&
225 split
->start
= em
->start
;
226 split
->len
= start
- em
->start
;
227 split
->orig_start
= em
->orig_start
;
228 split
->block_start
= em
->block_start
;
231 split
->block_len
= em
->block_len
;
233 split
->block_len
= split
->len
;
235 split
->bdev
= em
->bdev
;
236 split
->flags
= flags
;
237 split
->compress_type
= em
->compress_type
;
238 ret
= add_extent_mapping(em_tree
, split
);
240 free_extent_map(split
);
244 if (em
->block_start
< EXTENT_MAP_LAST_BYTE
&&
245 testend
&& em
->start
+ em
->len
> start
+ len
) {
246 u64 diff
= start
+ len
- em
->start
;
248 split
->start
= start
+ len
;
249 split
->len
= em
->start
+ em
->len
- (start
+ len
);
250 split
->bdev
= em
->bdev
;
251 split
->flags
= flags
;
252 split
->compress_type
= em
->compress_type
;
255 split
->block_len
= em
->block_len
;
256 split
->block_start
= em
->block_start
;
257 split
->orig_start
= em
->orig_start
;
259 split
->block_len
= split
->len
;
260 split
->block_start
= em
->block_start
+ diff
;
261 split
->orig_start
= split
->start
;
264 ret
= add_extent_mapping(em_tree
, split
);
266 free_extent_map(split
);
269 write_unlock(&em_tree
->lock
);
273 /* once for the tree*/
277 free_extent_map(split
);
279 free_extent_map(split2
);
284 * this is very complex, but the basic idea is to drop all extents
285 * in the range start - end. hint_block is filled in with a block number
286 * that would be a good hint to the block allocator for this file.
288 * If an extent intersects the range but is not entirely inside the range
289 * it is either truncated or split. Anything entirely inside the range
290 * is deleted from the tree.
292 int btrfs_drop_extents(struct btrfs_trans_handle
*trans
, struct inode
*inode
,
293 u64 start
, u64 end
, u64
*hint_byte
, int drop_cache
)
295 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
296 struct extent_buffer
*leaf
;
297 struct btrfs_file_extent_item
*fi
;
298 struct btrfs_path
*path
;
299 struct btrfs_key key
;
300 struct btrfs_key new_key
;
301 u64 ino
= btrfs_ino(inode
);
302 u64 search_start
= start
;
305 u64 extent_offset
= 0;
314 btrfs_drop_extent_cache(inode
, start
, end
- 1, 0);
316 path
= btrfs_alloc_path();
322 ret
= btrfs_lookup_file_extent(trans
, root
, path
, ino
,
326 if (ret
> 0 && path
->slots
[0] > 0 && search_start
== start
) {
327 leaf
= path
->nodes
[0];
328 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0] - 1);
329 if (key
.objectid
== ino
&&
330 key
.type
== BTRFS_EXTENT_DATA_KEY
)
335 leaf
= path
->nodes
[0];
336 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
338 ret
= btrfs_next_leaf(root
, path
);
345 leaf
= path
->nodes
[0];
349 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
350 if (key
.objectid
> ino
||
351 key
.type
> BTRFS_EXTENT_DATA_KEY
|| key
.offset
>= end
)
354 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
355 struct btrfs_file_extent_item
);
356 extent_type
= btrfs_file_extent_type(leaf
, fi
);
358 if (extent_type
== BTRFS_FILE_EXTENT_REG
||
359 extent_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
360 disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
361 num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
362 extent_offset
= btrfs_file_extent_offset(leaf
, fi
);
363 extent_end
= key
.offset
+
364 btrfs_file_extent_num_bytes(leaf
, fi
);
365 } else if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
366 extent_end
= key
.offset
+
367 btrfs_file_extent_inline_len(leaf
, fi
);
370 extent_end
= search_start
;
373 if (extent_end
<= search_start
) {
378 search_start
= max(key
.offset
, start
);
380 btrfs_release_path(root
, path
);
385 * | - range to drop - |
386 * | -------- extent -------- |
388 if (start
> key
.offset
&& end
< extent_end
) {
390 BUG_ON(extent_type
== BTRFS_FILE_EXTENT_INLINE
);
392 memcpy(&new_key
, &key
, sizeof(new_key
));
393 new_key
.offset
= start
;
394 ret
= btrfs_duplicate_item(trans
, root
, path
,
396 if (ret
== -EAGAIN
) {
397 btrfs_release_path(root
, path
);
403 leaf
= path
->nodes
[0];
404 fi
= btrfs_item_ptr(leaf
, path
->slots
[0] - 1,
405 struct btrfs_file_extent_item
);
406 btrfs_set_file_extent_num_bytes(leaf
, fi
,
409 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
410 struct btrfs_file_extent_item
);
412 extent_offset
+= start
- key
.offset
;
413 btrfs_set_file_extent_offset(leaf
, fi
, extent_offset
);
414 btrfs_set_file_extent_num_bytes(leaf
, fi
,
416 btrfs_mark_buffer_dirty(leaf
);
418 if (disk_bytenr
> 0) {
419 ret
= btrfs_inc_extent_ref(trans
, root
,
420 disk_bytenr
, num_bytes
, 0,
421 root
->root_key
.objectid
,
423 start
- extent_offset
);
425 *hint_byte
= disk_bytenr
;
430 * | ---- range to drop ----- |
431 * | -------- extent -------- |
433 if (start
<= key
.offset
&& end
< extent_end
) {
434 BUG_ON(extent_type
== BTRFS_FILE_EXTENT_INLINE
);
436 memcpy(&new_key
, &key
, sizeof(new_key
));
437 new_key
.offset
= end
;
438 btrfs_set_item_key_safe(trans
, root
, path
, &new_key
);
440 extent_offset
+= end
- key
.offset
;
441 btrfs_set_file_extent_offset(leaf
, fi
, extent_offset
);
442 btrfs_set_file_extent_num_bytes(leaf
, fi
,
444 btrfs_mark_buffer_dirty(leaf
);
445 if (disk_bytenr
> 0) {
446 inode_sub_bytes(inode
, end
- key
.offset
);
447 *hint_byte
= disk_bytenr
;
452 search_start
= extent_end
;
454 * | ---- range to drop ----- |
455 * | -------- extent -------- |
457 if (start
> key
.offset
&& end
>= extent_end
) {
459 BUG_ON(extent_type
== BTRFS_FILE_EXTENT_INLINE
);
461 btrfs_set_file_extent_num_bytes(leaf
, fi
,
463 btrfs_mark_buffer_dirty(leaf
);
464 if (disk_bytenr
> 0) {
465 inode_sub_bytes(inode
, extent_end
- start
);
466 *hint_byte
= disk_bytenr
;
468 if (end
== extent_end
)
476 * | ---- range to drop ----- |
477 * | ------ extent ------ |
479 if (start
<= key
.offset
&& end
>= extent_end
) {
481 del_slot
= path
->slots
[0];
484 BUG_ON(del_slot
+ del_nr
!= path
->slots
[0]);
488 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
489 inode_sub_bytes(inode
,
490 extent_end
- key
.offset
);
491 extent_end
= ALIGN(extent_end
,
493 } else if (disk_bytenr
> 0) {
494 ret
= btrfs_free_extent(trans
, root
,
495 disk_bytenr
, num_bytes
, 0,
496 root
->root_key
.objectid
,
497 key
.objectid
, key
.offset
-
500 inode_sub_bytes(inode
,
501 extent_end
- key
.offset
);
502 *hint_byte
= disk_bytenr
;
505 if (end
== extent_end
)
508 if (path
->slots
[0] + 1 < btrfs_header_nritems(leaf
)) {
513 ret
= btrfs_del_items(trans
, root
, path
, del_slot
,
520 btrfs_release_path(root
, path
);
528 ret
= btrfs_del_items(trans
, root
, path
, del_slot
, del_nr
);
532 btrfs_free_path(path
);
536 static int extent_mergeable(struct extent_buffer
*leaf
, int slot
,
537 u64 objectid
, u64 bytenr
, u64 orig_offset
,
538 u64
*start
, u64
*end
)
540 struct btrfs_file_extent_item
*fi
;
541 struct btrfs_key key
;
544 if (slot
< 0 || slot
>= btrfs_header_nritems(leaf
))
547 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
548 if (key
.objectid
!= objectid
|| key
.type
!= BTRFS_EXTENT_DATA_KEY
)
551 fi
= btrfs_item_ptr(leaf
, slot
, struct btrfs_file_extent_item
);
552 if (btrfs_file_extent_type(leaf
, fi
) != BTRFS_FILE_EXTENT_REG
||
553 btrfs_file_extent_disk_bytenr(leaf
, fi
) != bytenr
||
554 btrfs_file_extent_offset(leaf
, fi
) != key
.offset
- orig_offset
||
555 btrfs_file_extent_compression(leaf
, fi
) ||
556 btrfs_file_extent_encryption(leaf
, fi
) ||
557 btrfs_file_extent_other_encoding(leaf
, fi
))
560 extent_end
= key
.offset
+ btrfs_file_extent_num_bytes(leaf
, fi
);
561 if ((*start
&& *start
!= key
.offset
) || (*end
&& *end
!= extent_end
))
570 * Mark extent in the range start - end as written.
572 * This changes extent type from 'pre-allocated' to 'regular'. If only
573 * part of extent is marked as written, the extent will be split into
576 int btrfs_mark_extent_written(struct btrfs_trans_handle
*trans
,
577 struct inode
*inode
, u64 start
, u64 end
)
579 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
580 struct extent_buffer
*leaf
;
581 struct btrfs_path
*path
;
582 struct btrfs_file_extent_item
*fi
;
583 struct btrfs_key key
;
584 struct btrfs_key new_key
;
596 u64 ino
= btrfs_ino(inode
);
598 btrfs_drop_extent_cache(inode
, start
, end
- 1, 0);
600 path
= btrfs_alloc_path();
606 key
.type
= BTRFS_EXTENT_DATA_KEY
;
609 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
612 if (ret
> 0 && path
->slots
[0] > 0)
615 leaf
= path
->nodes
[0];
616 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
617 BUG_ON(key
.objectid
!= ino
|| key
.type
!= BTRFS_EXTENT_DATA_KEY
);
618 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
619 struct btrfs_file_extent_item
);
620 BUG_ON(btrfs_file_extent_type(leaf
, fi
) !=
621 BTRFS_FILE_EXTENT_PREALLOC
);
622 extent_end
= key
.offset
+ btrfs_file_extent_num_bytes(leaf
, fi
);
623 BUG_ON(key
.offset
> start
|| extent_end
< end
);
625 bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
626 num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
627 orig_offset
= key
.offset
- btrfs_file_extent_offset(leaf
, fi
);
628 memcpy(&new_key
, &key
, sizeof(new_key
));
630 if (start
== key
.offset
&& end
< extent_end
) {
633 if (extent_mergeable(leaf
, path
->slots
[0] - 1,
634 ino
, bytenr
, orig_offset
,
635 &other_start
, &other_end
)) {
636 new_key
.offset
= end
;
637 btrfs_set_item_key_safe(trans
, root
, path
, &new_key
);
638 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
639 struct btrfs_file_extent_item
);
640 btrfs_set_file_extent_num_bytes(leaf
, fi
,
642 btrfs_set_file_extent_offset(leaf
, fi
,
644 fi
= btrfs_item_ptr(leaf
, path
->slots
[0] - 1,
645 struct btrfs_file_extent_item
);
646 btrfs_set_file_extent_num_bytes(leaf
, fi
,
648 btrfs_mark_buffer_dirty(leaf
);
653 if (start
> key
.offset
&& end
== extent_end
) {
656 if (extent_mergeable(leaf
, path
->slots
[0] + 1,
657 ino
, bytenr
, orig_offset
,
658 &other_start
, &other_end
)) {
659 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
660 struct btrfs_file_extent_item
);
661 btrfs_set_file_extent_num_bytes(leaf
, fi
,
664 new_key
.offset
= start
;
665 btrfs_set_item_key_safe(trans
, root
, path
, &new_key
);
667 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
668 struct btrfs_file_extent_item
);
669 btrfs_set_file_extent_num_bytes(leaf
, fi
,
671 btrfs_set_file_extent_offset(leaf
, fi
,
672 start
- orig_offset
);
673 btrfs_mark_buffer_dirty(leaf
);
678 while (start
> key
.offset
|| end
< extent_end
) {
679 if (key
.offset
== start
)
682 new_key
.offset
= split
;
683 ret
= btrfs_duplicate_item(trans
, root
, path
, &new_key
);
684 if (ret
== -EAGAIN
) {
685 btrfs_release_path(root
, path
);
690 leaf
= path
->nodes
[0];
691 fi
= btrfs_item_ptr(leaf
, path
->slots
[0] - 1,
692 struct btrfs_file_extent_item
);
693 btrfs_set_file_extent_num_bytes(leaf
, fi
,
696 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
697 struct btrfs_file_extent_item
);
699 btrfs_set_file_extent_offset(leaf
, fi
, split
- orig_offset
);
700 btrfs_set_file_extent_num_bytes(leaf
, fi
,
702 btrfs_mark_buffer_dirty(leaf
);
704 ret
= btrfs_inc_extent_ref(trans
, root
, bytenr
, num_bytes
, 0,
705 root
->root_key
.objectid
,
709 if (split
== start
) {
712 BUG_ON(start
!= key
.offset
);
721 if (extent_mergeable(leaf
, path
->slots
[0] + 1,
722 ino
, bytenr
, orig_offset
,
723 &other_start
, &other_end
)) {
725 btrfs_release_path(root
, path
);
728 extent_end
= other_end
;
729 del_slot
= path
->slots
[0] + 1;
731 ret
= btrfs_free_extent(trans
, root
, bytenr
, num_bytes
,
732 0, root
->root_key
.objectid
,
738 if (extent_mergeable(leaf
, path
->slots
[0] - 1,
739 ino
, bytenr
, orig_offset
,
740 &other_start
, &other_end
)) {
742 btrfs_release_path(root
, path
);
745 key
.offset
= other_start
;
746 del_slot
= path
->slots
[0];
748 ret
= btrfs_free_extent(trans
, root
, bytenr
, num_bytes
,
749 0, root
->root_key
.objectid
,
754 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
755 struct btrfs_file_extent_item
);
756 btrfs_set_file_extent_type(leaf
, fi
,
757 BTRFS_FILE_EXTENT_REG
);
758 btrfs_mark_buffer_dirty(leaf
);
760 fi
= btrfs_item_ptr(leaf
, del_slot
- 1,
761 struct btrfs_file_extent_item
);
762 btrfs_set_file_extent_type(leaf
, fi
,
763 BTRFS_FILE_EXTENT_REG
);
764 btrfs_set_file_extent_num_bytes(leaf
, fi
,
765 extent_end
- key
.offset
);
766 btrfs_mark_buffer_dirty(leaf
);
768 ret
= btrfs_del_items(trans
, root
, path
, del_slot
, del_nr
);
772 btrfs_free_path(path
);
777 * on error we return an unlocked page and the error value
778 * on success we return a locked page and 0
780 static int prepare_uptodate_page(struct page
*page
, u64 pos
)
784 if ((pos
& (PAGE_CACHE_SIZE
- 1)) && !PageUptodate(page
)) {
785 ret
= btrfs_readpage(NULL
, page
);
789 if (!PageUptodate(page
)) {
798 * this gets pages into the page cache and locks them down, it also properly
799 * waits for data=ordered extents to finish before allowing the pages to be
802 static noinline
int prepare_pages(struct btrfs_root
*root
, struct file
*file
,
803 struct page
**pages
, size_t num_pages
,
804 loff_t pos
, unsigned long first_index
,
805 unsigned long last_index
, size_t write_bytes
)
807 struct extent_state
*cached_state
= NULL
;
809 unsigned long index
= pos
>> PAGE_CACHE_SHIFT
;
810 struct inode
*inode
= fdentry(file
)->d_inode
;
816 start_pos
= pos
& ~((u64
)root
->sectorsize
- 1);
817 last_pos
= ((u64
)index
+ num_pages
) << PAGE_CACHE_SHIFT
;
819 if (start_pos
> inode
->i_size
) {
820 err
= btrfs_cont_expand(inode
, i_size_read(inode
), start_pos
);
826 for (i
= 0; i
< num_pages
; i
++) {
827 pages
[i
] = grab_cache_page(inode
->i_mapping
, index
+ i
);
835 err
= prepare_uptodate_page(pages
[i
], pos
);
836 if (i
== num_pages
- 1)
837 err
= prepare_uptodate_page(pages
[i
],
840 page_cache_release(pages
[i
]);
844 wait_on_page_writeback(pages
[i
]);
847 if (start_pos
< inode
->i_size
) {
848 struct btrfs_ordered_extent
*ordered
;
849 lock_extent_bits(&BTRFS_I(inode
)->io_tree
,
850 start_pos
, last_pos
- 1, 0, &cached_state
,
852 ordered
= btrfs_lookup_first_ordered_extent(inode
,
855 ordered
->file_offset
+ ordered
->len
> start_pos
&&
856 ordered
->file_offset
< last_pos
) {
857 btrfs_put_ordered_extent(ordered
);
858 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
,
859 start_pos
, last_pos
- 1,
860 &cached_state
, GFP_NOFS
);
861 for (i
= 0; i
< num_pages
; i
++) {
862 unlock_page(pages
[i
]);
863 page_cache_release(pages
[i
]);
865 btrfs_wait_ordered_range(inode
, start_pos
,
866 last_pos
- start_pos
);
870 btrfs_put_ordered_extent(ordered
);
872 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, start_pos
,
873 last_pos
- 1, EXTENT_DIRTY
| EXTENT_DELALLOC
|
874 EXTENT_DO_ACCOUNTING
, 0, 0, &cached_state
,
876 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
,
877 start_pos
, last_pos
- 1, &cached_state
,
880 for (i
= 0; i
< num_pages
; i
++) {
881 clear_page_dirty_for_io(pages
[i
]);
882 set_page_extent_mapped(pages
[i
]);
883 WARN_ON(!PageLocked(pages
[i
]));
888 unlock_page(pages
[faili
]);
889 page_cache_release(pages
[faili
]);
896 static noinline ssize_t
__btrfs_buffered_write(struct file
*file
,
900 struct inode
*inode
= fdentry(file
)->d_inode
;
901 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
902 struct page
**pages
= NULL
;
903 unsigned long first_index
;
904 unsigned long last_index
;
905 size_t num_written
= 0;
909 nrptrs
= min((iov_iter_count(i
) + PAGE_CACHE_SIZE
- 1) /
910 PAGE_CACHE_SIZE
, PAGE_CACHE_SIZE
/
911 (sizeof(struct page
*)));
912 pages
= kmalloc(nrptrs
* sizeof(struct page
*), GFP_KERNEL
);
916 first_index
= pos
>> PAGE_CACHE_SHIFT
;
917 last_index
= (pos
+ iov_iter_count(i
)) >> PAGE_CACHE_SHIFT
;
919 while (iov_iter_count(i
) > 0) {
920 size_t offset
= pos
& (PAGE_CACHE_SIZE
- 1);
921 size_t write_bytes
= min(iov_iter_count(i
),
922 nrptrs
* (size_t)PAGE_CACHE_SIZE
-
924 size_t num_pages
= (write_bytes
+ offset
+
925 PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
929 WARN_ON(num_pages
> nrptrs
);
932 * Fault pages before locking them in prepare_pages
933 * to avoid recursive lock
935 if (unlikely(iov_iter_fault_in_readable(i
, write_bytes
))) {
940 ret
= btrfs_delalloc_reserve_space(inode
,
941 num_pages
<< PAGE_CACHE_SHIFT
);
946 * This is going to setup the pages array with the number of
947 * pages we want, so we don't really need to worry about the
948 * contents of pages from loop to loop
950 ret
= prepare_pages(root
, file
, pages
, num_pages
,
951 pos
, first_index
, last_index
,
954 btrfs_delalloc_release_space(inode
,
955 num_pages
<< PAGE_CACHE_SHIFT
);
959 copied
= btrfs_copy_from_user(pos
, num_pages
,
960 write_bytes
, pages
, i
);
963 * if we have trouble faulting in the pages, fall
964 * back to one page at a time
966 if (copied
< write_bytes
)
972 dirty_pages
= (copied
+ offset
+
973 PAGE_CACHE_SIZE
- 1) >>
977 * If we had a short copy we need to release the excess delaloc
978 * bytes we reserved. We need to increment outstanding_extents
979 * because btrfs_delalloc_release_space will decrement it, but
980 * we still have an outstanding extent for the chunk we actually
983 if (num_pages
> dirty_pages
) {
986 &BTRFS_I(inode
)->outstanding_extents
);
987 btrfs_delalloc_release_space(inode
,
988 (num_pages
- dirty_pages
) <<
993 ret
= btrfs_dirty_pages(root
, inode
, pages
,
994 dirty_pages
, pos
, copied
,
997 btrfs_delalloc_release_space(inode
,
998 dirty_pages
<< PAGE_CACHE_SHIFT
);
999 btrfs_drop_pages(pages
, num_pages
);
1004 btrfs_drop_pages(pages
, num_pages
);
1008 balance_dirty_pages_ratelimited_nr(inode
->i_mapping
,
1010 if (dirty_pages
< (root
->leafsize
>> PAGE_CACHE_SHIFT
) + 1)
1011 btrfs_btree_balance_dirty(root
, 1);
1012 btrfs_throttle(root
);
1015 num_written
+= copied
;
1020 return num_written
? num_written
: ret
;
1023 static ssize_t
__btrfs_direct_write(struct kiocb
*iocb
,
1024 const struct iovec
*iov
,
1025 unsigned long nr_segs
, loff_t pos
,
1026 loff_t
*ppos
, size_t count
, size_t ocount
)
1028 struct file
*file
= iocb
->ki_filp
;
1029 struct inode
*inode
= fdentry(file
)->d_inode
;
1032 ssize_t written_buffered
;
1036 written
= generic_file_direct_write(iocb
, iov
, &nr_segs
, pos
, ppos
,
1040 * the generic O_DIRECT will update in-memory i_size after the
1041 * DIOs are done. But our endio handlers that update the on
1042 * disk i_size never update past the in memory i_size. So we
1043 * need one more update here to catch any additions to the
1046 if (inode
->i_size
!= BTRFS_I(inode
)->disk_i_size
) {
1047 btrfs_ordered_update_i_size(inode
, inode
->i_size
, NULL
);
1048 mark_inode_dirty(inode
);
1051 if (written
< 0 || written
== count
)
1056 iov_iter_init(&i
, iov
, nr_segs
, count
, written
);
1057 written_buffered
= __btrfs_buffered_write(file
, &i
, pos
);
1058 if (written_buffered
< 0) {
1059 err
= written_buffered
;
1062 endbyte
= pos
+ written_buffered
- 1;
1063 err
= filemap_write_and_wait_range(file
->f_mapping
, pos
, endbyte
);
1066 written
+= written_buffered
;
1067 *ppos
= pos
+ written_buffered
;
1068 invalidate_mapping_pages(file
->f_mapping
, pos
>> PAGE_CACHE_SHIFT
,
1069 endbyte
>> PAGE_CACHE_SHIFT
);
1071 return written
? written
: err
;
1074 static ssize_t
btrfs_file_aio_write(struct kiocb
*iocb
,
1075 const struct iovec
*iov
,
1076 unsigned long nr_segs
, loff_t pos
)
1078 struct file
*file
= iocb
->ki_filp
;
1079 struct inode
*inode
= fdentry(file
)->d_inode
;
1080 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1081 loff_t
*ppos
= &iocb
->ki_pos
;
1082 ssize_t num_written
= 0;
1084 size_t count
, ocount
;
1086 vfs_check_frozen(inode
->i_sb
, SB_FREEZE_WRITE
);
1088 mutex_lock(&inode
->i_mutex
);
1090 err
= generic_segment_checks(iov
, &nr_segs
, &ocount
, VERIFY_READ
);
1092 mutex_unlock(&inode
->i_mutex
);
1097 current
->backing_dev_info
= inode
->i_mapping
->backing_dev_info
;
1098 err
= generic_write_checks(file
, &pos
, &count
, S_ISBLK(inode
->i_mode
));
1100 mutex_unlock(&inode
->i_mutex
);
1105 mutex_unlock(&inode
->i_mutex
);
1109 err
= file_remove_suid(file
);
1111 mutex_unlock(&inode
->i_mutex
);
1116 * If BTRFS flips readonly due to some impossible error
1117 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1118 * although we have opened a file as writable, we have
1119 * to stop this write operation to ensure FS consistency.
1121 if (root
->fs_info
->fs_state
& BTRFS_SUPER_FLAG_ERROR
) {
1122 mutex_unlock(&inode
->i_mutex
);
1127 file_update_time(file
);
1128 BTRFS_I(inode
)->sequence
++;
1130 if (unlikely(file
->f_flags
& O_DIRECT
)) {
1131 num_written
= __btrfs_direct_write(iocb
, iov
, nr_segs
,
1132 pos
, ppos
, count
, ocount
);
1136 iov_iter_init(&i
, iov
, nr_segs
, count
, num_written
);
1138 num_written
= __btrfs_buffered_write(file
, &i
, pos
);
1139 if (num_written
> 0)
1140 *ppos
= pos
+ num_written
;
1143 mutex_unlock(&inode
->i_mutex
);
1146 * we want to make sure fsync finds this change
1147 * but we haven't joined a transaction running right now.
1149 * Later on, someone is sure to update the inode and get the
1150 * real transid recorded.
1152 * We set last_trans now to the fs_info generation + 1,
1153 * this will either be one more than the running transaction
1154 * or the generation used for the next transaction if there isn't
1155 * one running right now.
1157 BTRFS_I(inode
)->last_trans
= root
->fs_info
->generation
+ 1;
1158 if (num_written
> 0 || num_written
== -EIOCBQUEUED
) {
1159 err
= generic_write_sync(file
, pos
, num_written
);
1160 if (err
< 0 && num_written
> 0)
1164 current
->backing_dev_info
= NULL
;
1165 return num_written
? num_written
: err
;
1168 int btrfs_release_file(struct inode
*inode
, struct file
*filp
)
1171 * ordered_data_close is set by settattr when we are about to truncate
1172 * a file from a non-zero size to a zero size. This tries to
1173 * flush down new bytes that may have been written if the
1174 * application were using truncate to replace a file in place.
1176 if (BTRFS_I(inode
)->ordered_data_close
) {
1177 BTRFS_I(inode
)->ordered_data_close
= 0;
1178 btrfs_add_ordered_operation(NULL
, BTRFS_I(inode
)->root
, inode
);
1179 if (inode
->i_size
> BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT
)
1180 filemap_flush(inode
->i_mapping
);
1182 if (filp
->private_data
)
1183 btrfs_ioctl_trans_end(filp
);
1188 * fsync call for both files and directories. This logs the inode into
1189 * the tree log instead of forcing full commits whenever possible.
1191 * It needs to call filemap_fdatawait so that all ordered extent updates are
1192 * in the metadata btree are up to date for copying to the log.
1194 * It drops the inode mutex before doing the tree log commit. This is an
1195 * important optimization for directories because holding the mutex prevents
1196 * new operations on the dir while we write to disk.
1198 int btrfs_sync_file(struct file
*file
, int datasync
)
1200 struct dentry
*dentry
= file
->f_path
.dentry
;
1201 struct inode
*inode
= dentry
->d_inode
;
1202 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1204 struct btrfs_trans_handle
*trans
;
1206 trace_btrfs_sync_file(file
, datasync
);
1208 /* we wait first, since the writeback may change the inode */
1210 /* the VFS called filemap_fdatawrite for us */
1211 btrfs_wait_ordered_range(inode
, 0, (u64
)-1);
1215 * check the transaction that last modified this inode
1216 * and see if its already been committed
1218 if (!BTRFS_I(inode
)->last_trans
)
1222 * if the last transaction that changed this file was before
1223 * the current transaction, we can bail out now without any
1226 mutex_lock(&root
->fs_info
->trans_mutex
);
1227 if (BTRFS_I(inode
)->last_trans
<=
1228 root
->fs_info
->last_trans_committed
) {
1229 BTRFS_I(inode
)->last_trans
= 0;
1230 mutex_unlock(&root
->fs_info
->trans_mutex
);
1233 mutex_unlock(&root
->fs_info
->trans_mutex
);
1236 * ok we haven't committed the transaction yet, lets do a commit
1238 if (file
->private_data
)
1239 btrfs_ioctl_trans_end(file
);
1241 trans
= btrfs_start_transaction(root
, 0);
1242 if (IS_ERR(trans
)) {
1243 ret
= PTR_ERR(trans
);
1247 ret
= btrfs_log_dentry_safe(trans
, root
, dentry
);
1251 /* we've logged all the items and now have a consistent
1252 * version of the file in the log. It is possible that
1253 * someone will come in and modify the file, but that's
1254 * fine because the log is consistent on disk, and we
1255 * have references to all of the file's extents
1257 * It is possible that someone will come in and log the
1258 * file again, but that will end up using the synchronization
1259 * inside btrfs_sync_log to keep things safe.
1261 mutex_unlock(&dentry
->d_inode
->i_mutex
);
1263 if (ret
!= BTRFS_NO_LOG_SYNC
) {
1265 ret
= btrfs_commit_transaction(trans
, root
);
1267 ret
= btrfs_sync_log(trans
, root
);
1269 ret
= btrfs_end_transaction(trans
, root
);
1271 ret
= btrfs_commit_transaction(trans
, root
);
1274 ret
= btrfs_end_transaction(trans
, root
);
1276 mutex_lock(&dentry
->d_inode
->i_mutex
);
1278 return ret
> 0 ? -EIO
: ret
;
1281 static const struct vm_operations_struct btrfs_file_vm_ops
= {
1282 .fault
= filemap_fault
,
1283 .page_mkwrite
= btrfs_page_mkwrite
,
1286 static int btrfs_file_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
1288 struct address_space
*mapping
= filp
->f_mapping
;
1290 if (!mapping
->a_ops
->readpage
)
1293 file_accessed(filp
);
1294 vma
->vm_ops
= &btrfs_file_vm_ops
;
1295 vma
->vm_flags
|= VM_CAN_NONLINEAR
;
1300 static long btrfs_fallocate(struct file
*file
, int mode
,
1301 loff_t offset
, loff_t len
)
1303 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1304 struct extent_state
*cached_state
= NULL
;
1311 u64 mask
= BTRFS_I(inode
)->root
->sectorsize
- 1;
1312 struct extent_map
*em
;
1315 alloc_start
= offset
& ~mask
;
1316 alloc_end
= (offset
+ len
+ mask
) & ~mask
;
1318 /* We only support the FALLOC_FL_KEEP_SIZE mode */
1319 if (mode
& ~FALLOC_FL_KEEP_SIZE
)
1323 * wait for ordered IO before we have any locks. We'll loop again
1324 * below with the locks held.
1326 btrfs_wait_ordered_range(inode
, alloc_start
, alloc_end
- alloc_start
);
1328 mutex_lock(&inode
->i_mutex
);
1329 ret
= inode_newsize_ok(inode
, alloc_end
);
1333 if (alloc_start
> inode
->i_size
) {
1334 ret
= btrfs_cont_expand(inode
, i_size_read(inode
),
1340 ret
= btrfs_check_data_free_space(inode
, alloc_end
- alloc_start
);
1344 locked_end
= alloc_end
- 1;
1346 struct btrfs_ordered_extent
*ordered
;
1348 /* the extent lock is ordered inside the running
1351 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, alloc_start
,
1352 locked_end
, 0, &cached_state
, GFP_NOFS
);
1353 ordered
= btrfs_lookup_first_ordered_extent(inode
,
1356 ordered
->file_offset
+ ordered
->len
> alloc_start
&&
1357 ordered
->file_offset
< alloc_end
) {
1358 btrfs_put_ordered_extent(ordered
);
1359 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
,
1360 alloc_start
, locked_end
,
1361 &cached_state
, GFP_NOFS
);
1363 * we can't wait on the range with the transaction
1364 * running or with the extent lock held
1366 btrfs_wait_ordered_range(inode
, alloc_start
,
1367 alloc_end
- alloc_start
);
1370 btrfs_put_ordered_extent(ordered
);
1375 cur_offset
= alloc_start
;
1377 em
= btrfs_get_extent(inode
, NULL
, 0, cur_offset
,
1378 alloc_end
- cur_offset
, 0);
1379 BUG_ON(IS_ERR(em
) || !em
);
1380 last_byte
= min(extent_map_end(em
), alloc_end
);
1381 last_byte
= (last_byte
+ mask
) & ~mask
;
1382 if (em
->block_start
== EXTENT_MAP_HOLE
||
1383 (cur_offset
>= inode
->i_size
&&
1384 !test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
))) {
1385 ret
= btrfs_prealloc_file_range(inode
, mode
, cur_offset
,
1386 last_byte
- cur_offset
,
1387 1 << inode
->i_blkbits
,
1391 free_extent_map(em
);
1395 free_extent_map(em
);
1397 cur_offset
= last_byte
;
1398 if (cur_offset
>= alloc_end
) {
1403 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, alloc_start
, locked_end
,
1404 &cached_state
, GFP_NOFS
);
1406 btrfs_free_reserved_data_space(inode
, alloc_end
- alloc_start
);
1408 mutex_unlock(&inode
->i_mutex
);
1412 const struct file_operations btrfs_file_operations
= {
1413 .llseek
= generic_file_llseek
,
1414 .read
= do_sync_read
,
1415 .write
= do_sync_write
,
1416 .aio_read
= generic_file_aio_read
,
1417 .splice_read
= generic_file_splice_read
,
1418 .aio_write
= btrfs_file_aio_write
,
1419 .mmap
= btrfs_file_mmap
,
1420 .open
= generic_file_open
,
1421 .release
= btrfs_release_file
,
1422 .fsync
= btrfs_sync_file
,
1423 .fallocate
= btrfs_fallocate
,
1424 .unlocked_ioctl
= btrfs_ioctl
,
1425 #ifdef CONFIG_COMPAT
1426 .compat_ioctl
= btrfs_ioctl
,