ab22ca4f237fcaad91aae7b54bbc675a4d639d30
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / btrfs / file.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/fs.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/backing-dev.h>
26 #include <linux/mpage.h>
27 #include <linux/swap.h>
28 #include <linux/writeback.h>
29 #include <linux/statfs.h>
30 #include <linux/compat.h>
31 #include <linux/slab.h>
32 #include "ctree.h"
33 #include "disk-io.h"
34 #include "transaction.h"
35 #include "btrfs_inode.h"
36 #include "ioctl.h"
37 #include "print-tree.h"
38 #include "tree-log.h"
39 #include "locking.h"
40 #include "compat.h"
41
42
43 /* simple helper to fault in pages and copy. This should go away
44 * and be replaced with calls into generic code.
45 */
46 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
47 int write_bytes,
48 struct page **prepared_pages,
49 struct iov_iter *i)
50 {
51 size_t copied = 0;
52 int pg = 0;
53 int offset = pos & (PAGE_CACHE_SIZE - 1);
54 int total_copied = 0;
55
56 while (write_bytes > 0) {
57 size_t count = min_t(size_t,
58 PAGE_CACHE_SIZE - offset, write_bytes);
59 struct page *page = prepared_pages[pg];
60 /*
61 * Copy data from userspace to the current page
62 *
63 * Disable pagefault to avoid recursive lock since
64 * the pages are already locked
65 */
66 pagefault_disable();
67 copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
68 pagefault_enable();
69
70 /* Flush processor's dcache for this page */
71 flush_dcache_page(page);
72
73 /*
74 * if we get a partial write, we can end up with
75 * partially up to date pages. These add
76 * a lot of complexity, so make sure they don't
77 * happen by forcing this copy to be retried.
78 *
79 * The rest of the btrfs_file_write code will fall
80 * back to page at a time copies after we return 0.
81 */
82 if (!PageUptodate(page) && copied < count)
83 copied = 0;
84
85 iov_iter_advance(i, copied);
86 write_bytes -= copied;
87 total_copied += copied;
88
89 /* Return to btrfs_file_aio_write to fault page */
90 if (unlikely(copied == 0)) {
91 break;
92 }
93
94 if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
95 offset += copied;
96 } else {
97 pg++;
98 offset = 0;
99 }
100 }
101 return total_copied;
102 }
103
104 /*
105 * unlocks pages after btrfs_file_write is done with them
106 */
107 static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages)
108 {
109 size_t i;
110 for (i = 0; i < num_pages; i++) {
111 if (!pages[i])
112 break;
113 /* page checked is some magic around finding pages that
114 * have been modified without going through btrfs_set_page_dirty
115 * clear it here
116 */
117 ClearPageChecked(pages[i]);
118 unlock_page(pages[i]);
119 mark_page_accessed(pages[i]);
120 page_cache_release(pages[i]);
121 }
122 }
123
124 /*
125 * after copy_from_user, pages need to be dirtied and we need to make
126 * sure holes are created between the current EOF and the start of
127 * any next extents (if required).
128 *
129 * this also makes the decision about creating an inline extent vs
130 * doing real data extents, marking pages dirty and delalloc as required.
131 */
132 static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
133 struct btrfs_root *root,
134 struct file *file,
135 struct page **pages,
136 size_t num_pages,
137 loff_t pos,
138 size_t write_bytes)
139 {
140 int err = 0;
141 int i;
142 struct inode *inode = fdentry(file)->d_inode;
143 u64 num_bytes;
144 u64 start_pos;
145 u64 end_of_last_block;
146 u64 end_pos = pos + write_bytes;
147 loff_t isize = i_size_read(inode);
148
149 start_pos = pos & ~((u64)root->sectorsize - 1);
150 num_bytes = (write_bytes + pos - start_pos +
151 root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
152
153 end_of_last_block = start_pos + num_bytes - 1;
154 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
155 NULL);
156 BUG_ON(err);
157
158 for (i = 0; i < num_pages; i++) {
159 struct page *p = pages[i];
160 SetPageUptodate(p);
161 ClearPageChecked(p);
162 set_page_dirty(p);
163 }
164 if (end_pos > isize) {
165 i_size_write(inode, end_pos);
166 /* we've only changed i_size in ram, and we haven't updated
167 * the disk i_size. There is no need to log the inode
168 * at this time.
169 */
170 }
171 return 0;
172 }
173
174 /*
175 * this drops all the extents in the cache that intersect the range
176 * [start, end]. Existing extents are split as required.
177 */
178 int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
179 int skip_pinned)
180 {
181 struct extent_map *em;
182 struct extent_map *split = NULL;
183 struct extent_map *split2 = NULL;
184 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
185 u64 len = end - start + 1;
186 int ret;
187 int testend = 1;
188 unsigned long flags;
189 int compressed = 0;
190
191 WARN_ON(end < start);
192 if (end == (u64)-1) {
193 len = (u64)-1;
194 testend = 0;
195 }
196 while (1) {
197 if (!split)
198 split = alloc_extent_map(GFP_NOFS);
199 if (!split2)
200 split2 = alloc_extent_map(GFP_NOFS);
201 BUG_ON(!split || !split2);
202
203 write_lock(&em_tree->lock);
204 em = lookup_extent_mapping(em_tree, start, len);
205 if (!em) {
206 write_unlock(&em_tree->lock);
207 break;
208 }
209 flags = em->flags;
210 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
211 if (testend && em->start + em->len >= start + len) {
212 free_extent_map(em);
213 write_unlock(&em_tree->lock);
214 break;
215 }
216 start = em->start + em->len;
217 if (testend)
218 len = start + len - (em->start + em->len);
219 free_extent_map(em);
220 write_unlock(&em_tree->lock);
221 continue;
222 }
223 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
224 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
225 remove_extent_mapping(em_tree, em);
226
227 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
228 em->start < start) {
229 split->start = em->start;
230 split->len = start - em->start;
231 split->orig_start = em->orig_start;
232 split->block_start = em->block_start;
233
234 if (compressed)
235 split->block_len = em->block_len;
236 else
237 split->block_len = split->len;
238
239 split->bdev = em->bdev;
240 split->flags = flags;
241 split->compress_type = em->compress_type;
242 ret = add_extent_mapping(em_tree, split);
243 BUG_ON(ret);
244 free_extent_map(split);
245 split = split2;
246 split2 = NULL;
247 }
248 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
249 testend && em->start + em->len > start + len) {
250 u64 diff = start + len - em->start;
251
252 split->start = start + len;
253 split->len = em->start + em->len - (start + len);
254 split->bdev = em->bdev;
255 split->flags = flags;
256 split->compress_type = em->compress_type;
257
258 if (compressed) {
259 split->block_len = em->block_len;
260 split->block_start = em->block_start;
261 split->orig_start = em->orig_start;
262 } else {
263 split->block_len = split->len;
264 split->block_start = em->block_start + diff;
265 split->orig_start = split->start;
266 }
267
268 ret = add_extent_mapping(em_tree, split);
269 BUG_ON(ret);
270 free_extent_map(split);
271 split = NULL;
272 }
273 write_unlock(&em_tree->lock);
274
275 /* once for us */
276 free_extent_map(em);
277 /* once for the tree*/
278 free_extent_map(em);
279 }
280 if (split)
281 free_extent_map(split);
282 if (split2)
283 free_extent_map(split2);
284 return 0;
285 }
286
287 /*
288 * this is very complex, but the basic idea is to drop all extents
289 * in the range start - end. hint_block is filled in with a block number
290 * that would be a good hint to the block allocator for this file.
291 *
292 * If an extent intersects the range but is not entirely inside the range
293 * it is either truncated or split. Anything entirely inside the range
294 * is deleted from the tree.
295 */
296 int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
297 u64 start, u64 end, u64 *hint_byte, int drop_cache)
298 {
299 struct btrfs_root *root = BTRFS_I(inode)->root;
300 struct extent_buffer *leaf;
301 struct btrfs_file_extent_item *fi;
302 struct btrfs_path *path;
303 struct btrfs_key key;
304 struct btrfs_key new_key;
305 u64 search_start = start;
306 u64 disk_bytenr = 0;
307 u64 num_bytes = 0;
308 u64 extent_offset = 0;
309 u64 extent_end = 0;
310 int del_nr = 0;
311 int del_slot = 0;
312 int extent_type;
313 int recow;
314 int ret;
315
316 if (drop_cache)
317 btrfs_drop_extent_cache(inode, start, end - 1, 0);
318
319 path = btrfs_alloc_path();
320 if (!path)
321 return -ENOMEM;
322
323 while (1) {
324 recow = 0;
325 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
326 search_start, -1);
327 if (ret < 0)
328 break;
329 if (ret > 0 && path->slots[0] > 0 && search_start == start) {
330 leaf = path->nodes[0];
331 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
332 if (key.objectid == inode->i_ino &&
333 key.type == BTRFS_EXTENT_DATA_KEY)
334 path->slots[0]--;
335 }
336 ret = 0;
337 next_slot:
338 leaf = path->nodes[0];
339 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
340 BUG_ON(del_nr > 0);
341 ret = btrfs_next_leaf(root, path);
342 if (ret < 0)
343 break;
344 if (ret > 0) {
345 ret = 0;
346 break;
347 }
348 leaf = path->nodes[0];
349 recow = 1;
350 }
351
352 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
353 if (key.objectid > inode->i_ino ||
354 key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
355 break;
356
357 fi = btrfs_item_ptr(leaf, path->slots[0],
358 struct btrfs_file_extent_item);
359 extent_type = btrfs_file_extent_type(leaf, fi);
360
361 if (extent_type == BTRFS_FILE_EXTENT_REG ||
362 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
363 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
364 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
365 extent_offset = btrfs_file_extent_offset(leaf, fi);
366 extent_end = key.offset +
367 btrfs_file_extent_num_bytes(leaf, fi);
368 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
369 extent_end = key.offset +
370 btrfs_file_extent_inline_len(leaf, fi);
371 } else {
372 WARN_ON(1);
373 extent_end = search_start;
374 }
375
376 if (extent_end <= search_start) {
377 path->slots[0]++;
378 goto next_slot;
379 }
380
381 search_start = max(key.offset, start);
382 if (recow) {
383 btrfs_release_path(root, path);
384 continue;
385 }
386
387 /*
388 * | - range to drop - |
389 * | -------- extent -------- |
390 */
391 if (start > key.offset && end < extent_end) {
392 BUG_ON(del_nr > 0);
393 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
394
395 memcpy(&new_key, &key, sizeof(new_key));
396 new_key.offset = start;
397 ret = btrfs_duplicate_item(trans, root, path,
398 &new_key);
399 if (ret == -EAGAIN) {
400 btrfs_release_path(root, path);
401 continue;
402 }
403 if (ret < 0)
404 break;
405
406 leaf = path->nodes[0];
407 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
408 struct btrfs_file_extent_item);
409 btrfs_set_file_extent_num_bytes(leaf, fi,
410 start - key.offset);
411
412 fi = btrfs_item_ptr(leaf, path->slots[0],
413 struct btrfs_file_extent_item);
414
415 extent_offset += start - key.offset;
416 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
417 btrfs_set_file_extent_num_bytes(leaf, fi,
418 extent_end - start);
419 btrfs_mark_buffer_dirty(leaf);
420
421 if (disk_bytenr > 0) {
422 ret = btrfs_inc_extent_ref(trans, root,
423 disk_bytenr, num_bytes, 0,
424 root->root_key.objectid,
425 new_key.objectid,
426 start - extent_offset);
427 BUG_ON(ret);
428 *hint_byte = disk_bytenr;
429 }
430 key.offset = start;
431 }
432 /*
433 * | ---- range to drop ----- |
434 * | -------- extent -------- |
435 */
436 if (start <= key.offset && end < extent_end) {
437 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
438
439 memcpy(&new_key, &key, sizeof(new_key));
440 new_key.offset = end;
441 btrfs_set_item_key_safe(trans, root, path, &new_key);
442
443 extent_offset += end - key.offset;
444 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
445 btrfs_set_file_extent_num_bytes(leaf, fi,
446 extent_end - end);
447 btrfs_mark_buffer_dirty(leaf);
448 if (disk_bytenr > 0) {
449 inode_sub_bytes(inode, end - key.offset);
450 *hint_byte = disk_bytenr;
451 }
452 break;
453 }
454
455 search_start = extent_end;
456 /*
457 * | ---- range to drop ----- |
458 * | -------- extent -------- |
459 */
460 if (start > key.offset && end >= extent_end) {
461 BUG_ON(del_nr > 0);
462 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
463
464 btrfs_set_file_extent_num_bytes(leaf, fi,
465 start - key.offset);
466 btrfs_mark_buffer_dirty(leaf);
467 if (disk_bytenr > 0) {
468 inode_sub_bytes(inode, extent_end - start);
469 *hint_byte = disk_bytenr;
470 }
471 if (end == extent_end)
472 break;
473
474 path->slots[0]++;
475 goto next_slot;
476 }
477
478 /*
479 * | ---- range to drop ----- |
480 * | ------ extent ------ |
481 */
482 if (start <= key.offset && end >= extent_end) {
483 if (del_nr == 0) {
484 del_slot = path->slots[0];
485 del_nr = 1;
486 } else {
487 BUG_ON(del_slot + del_nr != path->slots[0]);
488 del_nr++;
489 }
490
491 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
492 inode_sub_bytes(inode,
493 extent_end - key.offset);
494 extent_end = ALIGN(extent_end,
495 root->sectorsize);
496 } else if (disk_bytenr > 0) {
497 ret = btrfs_free_extent(trans, root,
498 disk_bytenr, num_bytes, 0,
499 root->root_key.objectid,
500 key.objectid, key.offset -
501 extent_offset);
502 BUG_ON(ret);
503 inode_sub_bytes(inode,
504 extent_end - key.offset);
505 *hint_byte = disk_bytenr;
506 }
507
508 if (end == extent_end)
509 break;
510
511 if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
512 path->slots[0]++;
513 goto next_slot;
514 }
515
516 ret = btrfs_del_items(trans, root, path, del_slot,
517 del_nr);
518 BUG_ON(ret);
519
520 del_nr = 0;
521 del_slot = 0;
522
523 btrfs_release_path(root, path);
524 continue;
525 }
526
527 BUG_ON(1);
528 }
529
530 if (del_nr > 0) {
531 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
532 BUG_ON(ret);
533 }
534
535 btrfs_free_path(path);
536 return ret;
537 }
538
539 static int extent_mergeable(struct extent_buffer *leaf, int slot,
540 u64 objectid, u64 bytenr, u64 orig_offset,
541 u64 *start, u64 *end)
542 {
543 struct btrfs_file_extent_item *fi;
544 struct btrfs_key key;
545 u64 extent_end;
546
547 if (slot < 0 || slot >= btrfs_header_nritems(leaf))
548 return 0;
549
550 btrfs_item_key_to_cpu(leaf, &key, slot);
551 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
552 return 0;
553
554 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
555 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
556 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
557 btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
558 btrfs_file_extent_compression(leaf, fi) ||
559 btrfs_file_extent_encryption(leaf, fi) ||
560 btrfs_file_extent_other_encoding(leaf, fi))
561 return 0;
562
563 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
564 if ((*start && *start != key.offset) || (*end && *end != extent_end))
565 return 0;
566
567 *start = key.offset;
568 *end = extent_end;
569 return 1;
570 }
571
572 /*
573 * Mark extent in the range start - end as written.
574 *
575 * This changes extent type from 'pre-allocated' to 'regular'. If only
576 * part of extent is marked as written, the extent will be split into
577 * two or three.
578 */
579 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
580 struct inode *inode, u64 start, u64 end)
581 {
582 struct btrfs_root *root = BTRFS_I(inode)->root;
583 struct extent_buffer *leaf;
584 struct btrfs_path *path;
585 struct btrfs_file_extent_item *fi;
586 struct btrfs_key key;
587 struct btrfs_key new_key;
588 u64 bytenr;
589 u64 num_bytes;
590 u64 extent_end;
591 u64 orig_offset;
592 u64 other_start;
593 u64 other_end;
594 u64 split;
595 int del_nr = 0;
596 int del_slot = 0;
597 int recow;
598 int ret;
599
600 btrfs_drop_extent_cache(inode, start, end - 1, 0);
601
602 path = btrfs_alloc_path();
603 BUG_ON(!path);
604 again:
605 recow = 0;
606 split = start;
607 key.objectid = inode->i_ino;
608 key.type = BTRFS_EXTENT_DATA_KEY;
609 key.offset = split;
610
611 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
612 if (ret > 0 && path->slots[0] > 0)
613 path->slots[0]--;
614
615 leaf = path->nodes[0];
616 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
617 BUG_ON(key.objectid != inode->i_ino ||
618 key.type != BTRFS_EXTENT_DATA_KEY);
619 fi = btrfs_item_ptr(leaf, path->slots[0],
620 struct btrfs_file_extent_item);
621 BUG_ON(btrfs_file_extent_type(leaf, fi) !=
622 BTRFS_FILE_EXTENT_PREALLOC);
623 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
624 BUG_ON(key.offset > start || extent_end < end);
625
626 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
627 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
628 orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
629 memcpy(&new_key, &key, sizeof(new_key));
630
631 if (start == key.offset && end < extent_end) {
632 other_start = 0;
633 other_end = start;
634 if (extent_mergeable(leaf, path->slots[0] - 1,
635 inode->i_ino, bytenr, orig_offset,
636 &other_start, &other_end)) {
637 new_key.offset = end;
638 btrfs_set_item_key_safe(trans, root, path, &new_key);
639 fi = btrfs_item_ptr(leaf, path->slots[0],
640 struct btrfs_file_extent_item);
641 btrfs_set_file_extent_num_bytes(leaf, fi,
642 extent_end - end);
643 btrfs_set_file_extent_offset(leaf, fi,
644 end - orig_offset);
645 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
646 struct btrfs_file_extent_item);
647 btrfs_set_file_extent_num_bytes(leaf, fi,
648 end - other_start);
649 btrfs_mark_buffer_dirty(leaf);
650 goto out;
651 }
652 }
653
654 if (start > key.offset && end == extent_end) {
655 other_start = end;
656 other_end = 0;
657 if (extent_mergeable(leaf, path->slots[0] + 1,
658 inode->i_ino, bytenr, orig_offset,
659 &other_start, &other_end)) {
660 fi = btrfs_item_ptr(leaf, path->slots[0],
661 struct btrfs_file_extent_item);
662 btrfs_set_file_extent_num_bytes(leaf, fi,
663 start - key.offset);
664 path->slots[0]++;
665 new_key.offset = start;
666 btrfs_set_item_key_safe(trans, root, path, &new_key);
667
668 fi = btrfs_item_ptr(leaf, path->slots[0],
669 struct btrfs_file_extent_item);
670 btrfs_set_file_extent_num_bytes(leaf, fi,
671 other_end - start);
672 btrfs_set_file_extent_offset(leaf, fi,
673 start - orig_offset);
674 btrfs_mark_buffer_dirty(leaf);
675 goto out;
676 }
677 }
678
679 while (start > key.offset || end < extent_end) {
680 if (key.offset == start)
681 split = end;
682
683 new_key.offset = split;
684 ret = btrfs_duplicate_item(trans, root, path, &new_key);
685 if (ret == -EAGAIN) {
686 btrfs_release_path(root, path);
687 goto again;
688 }
689 BUG_ON(ret < 0);
690
691 leaf = path->nodes[0];
692 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
693 struct btrfs_file_extent_item);
694 btrfs_set_file_extent_num_bytes(leaf, fi,
695 split - key.offset);
696
697 fi = btrfs_item_ptr(leaf, path->slots[0],
698 struct btrfs_file_extent_item);
699
700 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
701 btrfs_set_file_extent_num_bytes(leaf, fi,
702 extent_end - split);
703 btrfs_mark_buffer_dirty(leaf);
704
705 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
706 root->root_key.objectid,
707 inode->i_ino, orig_offset);
708 BUG_ON(ret);
709
710 if (split == start) {
711 key.offset = start;
712 } else {
713 BUG_ON(start != key.offset);
714 path->slots[0]--;
715 extent_end = end;
716 }
717 recow = 1;
718 }
719
720 other_start = end;
721 other_end = 0;
722 if (extent_mergeable(leaf, path->slots[0] + 1,
723 inode->i_ino, bytenr, orig_offset,
724 &other_start, &other_end)) {
725 if (recow) {
726 btrfs_release_path(root, path);
727 goto again;
728 }
729 extent_end = other_end;
730 del_slot = path->slots[0] + 1;
731 del_nr++;
732 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
733 0, root->root_key.objectid,
734 inode->i_ino, orig_offset);
735 BUG_ON(ret);
736 }
737 other_start = 0;
738 other_end = start;
739 if (extent_mergeable(leaf, path->slots[0] - 1,
740 inode->i_ino, bytenr, orig_offset,
741 &other_start, &other_end)) {
742 if (recow) {
743 btrfs_release_path(root, path);
744 goto again;
745 }
746 key.offset = other_start;
747 del_slot = path->slots[0];
748 del_nr++;
749 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
750 0, root->root_key.objectid,
751 inode->i_ino, orig_offset);
752 BUG_ON(ret);
753 }
754 if (del_nr == 0) {
755 fi = btrfs_item_ptr(leaf, path->slots[0],
756 struct btrfs_file_extent_item);
757 btrfs_set_file_extent_type(leaf, fi,
758 BTRFS_FILE_EXTENT_REG);
759 btrfs_mark_buffer_dirty(leaf);
760 } else {
761 fi = btrfs_item_ptr(leaf, del_slot - 1,
762 struct btrfs_file_extent_item);
763 btrfs_set_file_extent_type(leaf, fi,
764 BTRFS_FILE_EXTENT_REG);
765 btrfs_set_file_extent_num_bytes(leaf, fi,
766 extent_end - key.offset);
767 btrfs_mark_buffer_dirty(leaf);
768
769 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
770 BUG_ON(ret);
771 }
772 out:
773 btrfs_free_path(path);
774 return 0;
775 }
776
777 /*
778 * on error we return an unlocked page and the error value
779 * on success we return a locked page and 0
780 */
781 static int prepare_uptodate_page(struct page *page, u64 pos)
782 {
783 int ret = 0;
784
785 if ((pos & (PAGE_CACHE_SIZE - 1)) && !PageUptodate(page)) {
786 ret = btrfs_readpage(NULL, page);
787 if (ret)
788 return ret;
789 lock_page(page);
790 if (!PageUptodate(page)) {
791 unlock_page(page);
792 return -EIO;
793 }
794 }
795 return 0;
796 }
797
798 /*
799 * this gets pages into the page cache and locks them down, it also properly
800 * waits for data=ordered extents to finish before allowing the pages to be
801 * modified.
802 */
803 static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
804 struct page **pages, size_t num_pages,
805 loff_t pos, unsigned long first_index,
806 unsigned long last_index, size_t write_bytes)
807 {
808 struct extent_state *cached_state = NULL;
809 int i;
810 unsigned long index = pos >> PAGE_CACHE_SHIFT;
811 struct inode *inode = fdentry(file)->d_inode;
812 int err = 0;
813 int faili = 0;
814 u64 start_pos;
815 u64 last_pos;
816
817 start_pos = pos & ~((u64)root->sectorsize - 1);
818 last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
819
820 if (start_pos > inode->i_size) {
821 err = btrfs_cont_expand(inode, start_pos);
822 if (err)
823 return err;
824 }
825
826 memset(pages, 0, num_pages * sizeof(struct page *));
827 again:
828 for (i = 0; i < num_pages; i++) {
829 pages[i] = grab_cache_page(inode->i_mapping, index + i);
830 if (!pages[i]) {
831 faili = i - 1;
832 err = -ENOMEM;
833 goto fail;
834 }
835
836 if (i == 0)
837 err = prepare_uptodate_page(pages[i], pos);
838 if (i == num_pages - 1)
839 err = prepare_uptodate_page(pages[i],
840 pos + write_bytes);
841 if (err) {
842 page_cache_release(pages[i]);
843 faili = i - 1;
844 goto fail;
845 }
846 wait_on_page_writeback(pages[i]);
847 }
848 err = 0;
849 if (start_pos < inode->i_size) {
850 struct btrfs_ordered_extent *ordered;
851 lock_extent_bits(&BTRFS_I(inode)->io_tree,
852 start_pos, last_pos - 1, 0, &cached_state,
853 GFP_NOFS);
854 ordered = btrfs_lookup_first_ordered_extent(inode,
855 last_pos - 1);
856 if (ordered &&
857 ordered->file_offset + ordered->len > start_pos &&
858 ordered->file_offset < last_pos) {
859 btrfs_put_ordered_extent(ordered);
860 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
861 start_pos, last_pos - 1,
862 &cached_state, GFP_NOFS);
863 for (i = 0; i < num_pages; i++) {
864 unlock_page(pages[i]);
865 page_cache_release(pages[i]);
866 }
867 btrfs_wait_ordered_range(inode, start_pos,
868 last_pos - start_pos);
869 goto again;
870 }
871 if (ordered)
872 btrfs_put_ordered_extent(ordered);
873
874 clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
875 last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
876 EXTENT_DO_ACCOUNTING, 0, 0, &cached_state,
877 GFP_NOFS);
878 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
879 start_pos, last_pos - 1, &cached_state,
880 GFP_NOFS);
881 }
882 for (i = 0; i < num_pages; i++) {
883 clear_page_dirty_for_io(pages[i]);
884 set_page_extent_mapped(pages[i]);
885 WARN_ON(!PageLocked(pages[i]));
886 }
887 return 0;
888 fail:
889 while (faili >= 0) {
890 unlock_page(pages[faili]);
891 page_cache_release(pages[faili]);
892 faili--;
893 }
894 return err;
895
896 }
897
898 static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
899 const struct iovec *iov,
900 unsigned long nr_segs, loff_t pos)
901 {
902 struct file *file = iocb->ki_filp;
903 struct inode *inode = fdentry(file)->d_inode;
904 struct btrfs_root *root = BTRFS_I(inode)->root;
905 struct page **pages = NULL;
906 struct iov_iter i;
907 loff_t *ppos = &iocb->ki_pos;
908 loff_t start_pos;
909 ssize_t num_written = 0;
910 ssize_t err = 0;
911 size_t count;
912 size_t ocount;
913 int ret = 0;
914 int nrptrs;
915 unsigned long first_index;
916 unsigned long last_index;
917 int will_write;
918 int buffered = 0;
919 int copied = 0;
920 int dirty_pages = 0;
921
922 will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) ||
923 (file->f_flags & O_DIRECT));
924
925 start_pos = pos;
926
927 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
928
929 mutex_lock(&inode->i_mutex);
930
931 err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
932 if (err)
933 goto out;
934 count = ocount;
935
936 current->backing_dev_info = inode->i_mapping->backing_dev_info;
937 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
938 if (err)
939 goto out;
940
941 if (count == 0)
942 goto out;
943
944 err = file_remove_suid(file);
945 if (err)
946 goto out;
947
948 /*
949 * If BTRFS flips readonly due to some impossible error
950 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
951 * although we have opened a file as writable, we have
952 * to stop this write operation to ensure FS consistency.
953 */
954 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
955 err = -EROFS;
956 goto out;
957 }
958
959 file_update_time(file);
960 BTRFS_I(inode)->sequence++;
961
962 if (unlikely(file->f_flags & O_DIRECT)) {
963 num_written = generic_file_direct_write(iocb, iov, &nr_segs,
964 pos, ppos, count,
965 ocount);
966 /*
967 * the generic O_DIRECT will update in-memory i_size after the
968 * DIOs are done. But our endio handlers that update the on
969 * disk i_size never update past the in memory i_size. So we
970 * need one more update here to catch any additions to the
971 * file
972 */
973 if (inode->i_size != BTRFS_I(inode)->disk_i_size) {
974 btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
975 mark_inode_dirty(inode);
976 }
977
978 if (num_written < 0) {
979 ret = num_written;
980 num_written = 0;
981 goto out;
982 } else if (num_written == count) {
983 /* pick up pos changes done by the generic code */
984 pos = *ppos;
985 goto out;
986 }
987 /*
988 * We are going to do buffered for the rest of the range, so we
989 * need to make sure to invalidate the buffered pages when we're
990 * done.
991 */
992 buffered = 1;
993 pos += num_written;
994 }
995
996 iov_iter_init(&i, iov, nr_segs, count, num_written);
997 nrptrs = min((iov_iter_count(&i) + PAGE_CACHE_SIZE - 1) /
998 PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
999 (sizeof(struct page *)));
1000 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
1001 if (!pages) {
1002 ret = -ENOMEM;
1003 goto out;
1004 }
1005
1006 /* generic_write_checks can change our pos */
1007 start_pos = pos;
1008
1009 first_index = pos >> PAGE_CACHE_SHIFT;
1010 last_index = (pos + iov_iter_count(&i)) >> PAGE_CACHE_SHIFT;
1011
1012 while (iov_iter_count(&i) > 0) {
1013 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
1014 size_t write_bytes = min(iov_iter_count(&i),
1015 nrptrs * (size_t)PAGE_CACHE_SIZE -
1016 offset);
1017 size_t num_pages = (write_bytes + offset +
1018 PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1019
1020 WARN_ON(num_pages > nrptrs);
1021 memset(pages, 0, sizeof(struct page *) * nrptrs);
1022
1023 /*
1024 * Fault pages before locking them in prepare_pages
1025 * to avoid recursive lock
1026 */
1027 if (unlikely(iov_iter_fault_in_readable(&i, write_bytes))) {
1028 ret = -EFAULT;
1029 goto out;
1030 }
1031
1032 ret = btrfs_delalloc_reserve_space(inode,
1033 num_pages << PAGE_CACHE_SHIFT);
1034 if (ret)
1035 goto out;
1036
1037 ret = prepare_pages(root, file, pages, num_pages,
1038 pos, first_index, last_index,
1039 write_bytes);
1040 if (ret) {
1041 btrfs_delalloc_release_space(inode,
1042 num_pages << PAGE_CACHE_SHIFT);
1043 goto out;
1044 }
1045
1046 copied = btrfs_copy_from_user(pos, num_pages,
1047 write_bytes, pages, &i);
1048
1049 /*
1050 * if we have trouble faulting in the pages, fall
1051 * back to one page at a time
1052 */
1053 if (copied < write_bytes)
1054 nrptrs = 1;
1055
1056 if (copied == 0)
1057 dirty_pages = 0;
1058 else
1059 dirty_pages = (copied + offset +
1060 PAGE_CACHE_SIZE - 1) >>
1061 PAGE_CACHE_SHIFT;
1062
1063 if (num_pages > dirty_pages) {
1064 if (copied > 0)
1065 atomic_inc(
1066 &BTRFS_I(inode)->outstanding_extents);
1067 btrfs_delalloc_release_space(inode,
1068 (num_pages - dirty_pages) <<
1069 PAGE_CACHE_SHIFT);
1070 }
1071
1072 if (copied > 0) {
1073 dirty_and_release_pages(NULL, root, file, pages,
1074 dirty_pages, pos, copied);
1075 }
1076
1077 btrfs_drop_pages(pages, num_pages);
1078
1079 if (copied > 0) {
1080 if (will_write) {
1081 filemap_fdatawrite_range(inode->i_mapping, pos,
1082 pos + copied - 1);
1083 } else {
1084 balance_dirty_pages_ratelimited_nr(
1085 inode->i_mapping,
1086 dirty_pages);
1087 if (dirty_pages <
1088 (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
1089 btrfs_btree_balance_dirty(root, 1);
1090 btrfs_throttle(root);
1091 }
1092 }
1093
1094 pos += copied;
1095 num_written += copied;
1096
1097 cond_resched();
1098 }
1099 out:
1100 mutex_unlock(&inode->i_mutex);
1101 if (ret)
1102 err = ret;
1103
1104 kfree(pages);
1105 *ppos = pos;
1106
1107 /*
1108 * we want to make sure fsync finds this change
1109 * but we haven't joined a transaction running right now.
1110 *
1111 * Later on, someone is sure to update the inode and get the
1112 * real transid recorded.
1113 *
1114 * We set last_trans now to the fs_info generation + 1,
1115 * this will either be one more than the running transaction
1116 * or the generation used for the next transaction if there isn't
1117 * one running right now.
1118 */
1119 BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
1120
1121 if (num_written > 0 && will_write) {
1122 struct btrfs_trans_handle *trans;
1123
1124 err = btrfs_wait_ordered_range(inode, start_pos, num_written);
1125 if (err)
1126 num_written = err;
1127
1128 if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {
1129 trans = btrfs_start_transaction(root, 0);
1130 if (IS_ERR(trans)) {
1131 num_written = PTR_ERR(trans);
1132 goto done;
1133 }
1134 mutex_lock(&inode->i_mutex);
1135 ret = btrfs_log_dentry_safe(trans, root,
1136 file->f_dentry);
1137 mutex_unlock(&inode->i_mutex);
1138 if (ret == 0) {
1139 ret = btrfs_sync_log(trans, root);
1140 if (ret == 0)
1141 btrfs_end_transaction(trans, root);
1142 else
1143 btrfs_commit_transaction(trans, root);
1144 } else if (ret != BTRFS_NO_LOG_SYNC) {
1145 btrfs_commit_transaction(trans, root);
1146 } else {
1147 btrfs_end_transaction(trans, root);
1148 }
1149 }
1150 if (file->f_flags & O_DIRECT && buffered) {
1151 invalidate_mapping_pages(inode->i_mapping,
1152 start_pos >> PAGE_CACHE_SHIFT,
1153 (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
1154 }
1155 }
1156 done:
1157 current->backing_dev_info = NULL;
1158 return num_written ? num_written : err;
1159 }
1160
1161 int btrfs_release_file(struct inode *inode, struct file *filp)
1162 {
1163 /*
1164 * ordered_data_close is set by settattr when we are about to truncate
1165 * a file from a non-zero size to a zero size. This tries to
1166 * flush down new bytes that may have been written if the
1167 * application were using truncate to replace a file in place.
1168 */
1169 if (BTRFS_I(inode)->ordered_data_close) {
1170 BTRFS_I(inode)->ordered_data_close = 0;
1171 btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
1172 if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
1173 filemap_flush(inode->i_mapping);
1174 }
1175 if (filp->private_data)
1176 btrfs_ioctl_trans_end(filp);
1177 return 0;
1178 }
1179
1180 /*
1181 * fsync call for both files and directories. This logs the inode into
1182 * the tree log instead of forcing full commits whenever possible.
1183 *
1184 * It needs to call filemap_fdatawait so that all ordered extent updates are
1185 * in the metadata btree are up to date for copying to the log.
1186 *
1187 * It drops the inode mutex before doing the tree log commit. This is an
1188 * important optimization for directories because holding the mutex prevents
1189 * new operations on the dir while we write to disk.
1190 */
1191 int btrfs_sync_file(struct file *file, int datasync)
1192 {
1193 struct dentry *dentry = file->f_path.dentry;
1194 struct inode *inode = dentry->d_inode;
1195 struct btrfs_root *root = BTRFS_I(inode)->root;
1196 int ret = 0;
1197 struct btrfs_trans_handle *trans;
1198
1199
1200 /* we wait first, since the writeback may change the inode */
1201 root->log_batch++;
1202 /* the VFS called filemap_fdatawrite for us */
1203 btrfs_wait_ordered_range(inode, 0, (u64)-1);
1204 root->log_batch++;
1205
1206 /*
1207 * check the transaction that last modified this inode
1208 * and see if its already been committed
1209 */
1210 if (!BTRFS_I(inode)->last_trans)
1211 goto out;
1212
1213 /*
1214 * if the last transaction that changed this file was before
1215 * the current transaction, we can bail out now without any
1216 * syncing
1217 */
1218 mutex_lock(&root->fs_info->trans_mutex);
1219 if (BTRFS_I(inode)->last_trans <=
1220 root->fs_info->last_trans_committed) {
1221 BTRFS_I(inode)->last_trans = 0;
1222 mutex_unlock(&root->fs_info->trans_mutex);
1223 goto out;
1224 }
1225 mutex_unlock(&root->fs_info->trans_mutex);
1226
1227 /*
1228 * ok we haven't committed the transaction yet, lets do a commit
1229 */
1230 if (file->private_data)
1231 btrfs_ioctl_trans_end(file);
1232
1233 trans = btrfs_start_transaction(root, 0);
1234 if (IS_ERR(trans)) {
1235 ret = PTR_ERR(trans);
1236 goto out;
1237 }
1238
1239 ret = btrfs_log_dentry_safe(trans, root, dentry);
1240 if (ret < 0)
1241 goto out;
1242
1243 /* we've logged all the items and now have a consistent
1244 * version of the file in the log. It is possible that
1245 * someone will come in and modify the file, but that's
1246 * fine because the log is consistent on disk, and we
1247 * have references to all of the file's extents
1248 *
1249 * It is possible that someone will come in and log the
1250 * file again, but that will end up using the synchronization
1251 * inside btrfs_sync_log to keep things safe.
1252 */
1253 mutex_unlock(&dentry->d_inode->i_mutex);
1254
1255 if (ret != BTRFS_NO_LOG_SYNC) {
1256 if (ret > 0) {
1257 ret = btrfs_commit_transaction(trans, root);
1258 } else {
1259 ret = btrfs_sync_log(trans, root);
1260 if (ret == 0)
1261 ret = btrfs_end_transaction(trans, root);
1262 else
1263 ret = btrfs_commit_transaction(trans, root);
1264 }
1265 } else {
1266 ret = btrfs_end_transaction(trans, root);
1267 }
1268 mutex_lock(&dentry->d_inode->i_mutex);
1269 out:
1270 return ret > 0 ? -EIO : ret;
1271 }
1272
1273 static const struct vm_operations_struct btrfs_file_vm_ops = {
1274 .fault = filemap_fault,
1275 .page_mkwrite = btrfs_page_mkwrite,
1276 };
1277
1278 static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
1279 {
1280 struct address_space *mapping = filp->f_mapping;
1281
1282 if (!mapping->a_ops->readpage)
1283 return -ENOEXEC;
1284
1285 file_accessed(filp);
1286 vma->vm_ops = &btrfs_file_vm_ops;
1287 vma->vm_flags |= VM_CAN_NONLINEAR;
1288
1289 return 0;
1290 }
1291
1292 const struct file_operations btrfs_file_operations = {
1293 .llseek = generic_file_llseek,
1294 .read = do_sync_read,
1295 .write = do_sync_write,
1296 .aio_read = generic_file_aio_read,
1297 .splice_read = generic_file_splice_read,
1298 .aio_write = btrfs_file_aio_write,
1299 .mmap = btrfs_file_mmap,
1300 .open = generic_file_open,
1301 .release = btrfs_release_file,
1302 .fsync = btrfs_sync_file,
1303 .unlocked_ioctl = btrfs_ioctl,
1304 #ifdef CONFIG_COMPAT
1305 .compat_ioctl = btrfs_ioctl,
1306 #endif
1307 };