remove libdss from Makefile
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / fs / btrfs / inode.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/compat.h>
34 #include <linux/bit_spinlock.h>
35 #include <linux/xattr.h>
36 #include <linux/posix_acl.h>
37 #include <linux/falloc.h>
38 #include <linux/slab.h>
39 #include <linux/ratelimit.h>
40 #include <linux/mount.h>
41 #include <linux/btrfs.h>
42 #include <linux/blkdev.h>
43 #include <linux/posix_acl_xattr.h>
44 #include <linux/uio.h>
45 #include <asm/unaligned.h>
46 #include "ctree.h"
47 #include "disk-io.h"
48 #include "transaction.h"
49 #include "btrfs_inode.h"
50 #include "print-tree.h"
51 #include "ordered-data.h"
52 #include "xattr.h"
53 #include "tree-log.h"
54 #include "volumes.h"
55 #include "compression.h"
56 #include "locking.h"
57 #include "free-space-cache.h"
58 #include "inode-map.h"
59 #include "backref.h"
60 #include "hash.h"
61 #include "props.h"
62 #include "qgroup.h"
63 #include "dedupe.h"
64
65 struct btrfs_iget_args {
66 struct btrfs_key *location;
67 struct btrfs_root *root;
68 };
69
70 struct btrfs_dio_data {
71 u64 outstanding_extents;
72 u64 reserve;
73 u64 unsubmitted_oe_range_start;
74 u64 unsubmitted_oe_range_end;
75 int overwrite;
76 };
77
78 static const struct inode_operations btrfs_dir_inode_operations;
79 static const struct inode_operations btrfs_symlink_inode_operations;
80 static const struct inode_operations btrfs_dir_ro_inode_operations;
81 static const struct inode_operations btrfs_special_inode_operations;
82 static const struct inode_operations btrfs_file_inode_operations;
83 static const struct address_space_operations btrfs_aops;
84 static const struct address_space_operations btrfs_symlink_aops;
85 static const struct file_operations btrfs_dir_file_operations;
86 static const struct extent_io_ops btrfs_extent_io_ops;
87
88 static struct kmem_cache *btrfs_inode_cachep;
89 struct kmem_cache *btrfs_trans_handle_cachep;
90 struct kmem_cache *btrfs_path_cachep;
91 struct kmem_cache *btrfs_free_space_cachep;
92
93 #define S_SHIFT 12
94 static const unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
95 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
96 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
97 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
98 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
99 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
100 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
101 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
102 };
103
104 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
105 static int btrfs_truncate(struct inode *inode);
106 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
107 static noinline int cow_file_range(struct inode *inode,
108 struct page *locked_page,
109 u64 start, u64 end, u64 delalloc_end,
110 int *page_started, unsigned long *nr_written,
111 int unlock, struct btrfs_dedupe_hash *hash);
112 static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len,
113 u64 orig_start, u64 block_start,
114 u64 block_len, u64 orig_block_len,
115 u64 ram_bytes, int compress_type,
116 int type);
117
118 static void __endio_write_update_ordered(struct inode *inode,
119 const u64 offset, const u64 bytes,
120 const bool uptodate);
121
122 /*
123 * Cleanup all submitted ordered extents in specified range to handle errors
124 * from the fill_dellaloc() callback.
125 *
126 * NOTE: caller must ensure that when an error happens, it can not call
127 * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
128 * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
129 * to be released, which we want to happen only when finishing the ordered
130 * extent (btrfs_finish_ordered_io()). Also note that the caller of the
131 * fill_delalloc() callback already does proper cleanup for the first page of
132 * the range, that is, it invokes the callback writepage_end_io_hook() for the
133 * range of the first page.
134 */
135 static inline void btrfs_cleanup_ordered_extents(struct inode *inode,
136 const u64 offset,
137 const u64 bytes)
138 {
139 unsigned long index = offset >> PAGE_SHIFT;
140 unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
141 struct page *page;
142
143 while (index <= end_index) {
144 page = find_get_page(inode->i_mapping, index);
145 index++;
146 if (!page)
147 continue;
148 ClearPagePrivate2(page);
149 put_page(page);
150 }
151 return __endio_write_update_ordered(inode, offset + PAGE_SIZE,
152 bytes - PAGE_SIZE, false);
153 }
154
155 static int btrfs_dirty_inode(struct inode *inode);
156
157 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
158 void btrfs_test_inode_set_ops(struct inode *inode)
159 {
160 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
161 }
162 #endif
163
164 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
165 struct inode *inode, struct inode *dir,
166 const struct qstr *qstr)
167 {
168 int err;
169
170 err = btrfs_init_acl(trans, inode, dir);
171 if (!err)
172 err = btrfs_xattr_security_init(trans, inode, dir, qstr);
173 return err;
174 }
175
176 /*
177 * this does all the hard work for inserting an inline extent into
178 * the btree. The caller should have done a btrfs_drop_extents so that
179 * no overlapping inline items exist in the btree
180 */
181 static int insert_inline_extent(struct btrfs_trans_handle *trans,
182 struct btrfs_path *path, int extent_inserted,
183 struct btrfs_root *root, struct inode *inode,
184 u64 start, size_t size, size_t compressed_size,
185 int compress_type,
186 struct page **compressed_pages)
187 {
188 struct extent_buffer *leaf;
189 struct page *page = NULL;
190 char *kaddr;
191 unsigned long ptr;
192 struct btrfs_file_extent_item *ei;
193 int ret;
194 size_t cur_size = size;
195 unsigned long offset;
196
197 if (compressed_size && compressed_pages)
198 cur_size = compressed_size;
199
200 inode_add_bytes(inode, size);
201
202 if (!extent_inserted) {
203 struct btrfs_key key;
204 size_t datasize;
205
206 key.objectid = btrfs_ino(BTRFS_I(inode));
207 key.offset = start;
208 key.type = BTRFS_EXTENT_DATA_KEY;
209
210 datasize = btrfs_file_extent_calc_inline_size(cur_size);
211 path->leave_spinning = 1;
212 ret = btrfs_insert_empty_item(trans, root, path, &key,
213 datasize);
214 if (ret)
215 goto fail;
216 }
217 leaf = path->nodes[0];
218 ei = btrfs_item_ptr(leaf, path->slots[0],
219 struct btrfs_file_extent_item);
220 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
221 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
222 btrfs_set_file_extent_encryption(leaf, ei, 0);
223 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
224 btrfs_set_file_extent_ram_bytes(leaf, ei, size);
225 ptr = btrfs_file_extent_inline_start(ei);
226
227 if (compress_type != BTRFS_COMPRESS_NONE) {
228 struct page *cpage;
229 int i = 0;
230 while (compressed_size > 0) {
231 cpage = compressed_pages[i];
232 cur_size = min_t(unsigned long, compressed_size,
233 PAGE_SIZE);
234
235 kaddr = kmap_atomic(cpage);
236 write_extent_buffer(leaf, kaddr, ptr, cur_size);
237 kunmap_atomic(kaddr);
238
239 i++;
240 ptr += cur_size;
241 compressed_size -= cur_size;
242 }
243 btrfs_set_file_extent_compression(leaf, ei,
244 compress_type);
245 } else {
246 page = find_get_page(inode->i_mapping,
247 start >> PAGE_SHIFT);
248 btrfs_set_file_extent_compression(leaf, ei, 0);
249 kaddr = kmap_atomic(page);
250 offset = start & (PAGE_SIZE - 1);
251 write_extent_buffer(leaf, kaddr + offset, ptr, size);
252 kunmap_atomic(kaddr);
253 put_page(page);
254 }
255 btrfs_mark_buffer_dirty(leaf);
256 btrfs_release_path(path);
257
258 /*
259 * we're an inline extent, so nobody can
260 * extend the file past i_size without locking
261 * a page we already have locked.
262 *
263 * We must do any isize and inode updates
264 * before we unlock the pages. Otherwise we
265 * could end up racing with unlink.
266 */
267 BTRFS_I(inode)->disk_i_size = inode->i_size;
268 ret = btrfs_update_inode(trans, root, inode);
269
270 fail:
271 return ret;
272 }
273
274
275 /*
276 * conditionally insert an inline extent into the file. This
277 * does the checks required to make sure the data is small enough
278 * to fit as an inline extent.
279 */
280 static noinline int cow_file_range_inline(struct btrfs_root *root,
281 struct inode *inode, u64 start,
282 u64 end, size_t compressed_size,
283 int compress_type,
284 struct page **compressed_pages)
285 {
286 struct btrfs_fs_info *fs_info = root->fs_info;
287 struct btrfs_trans_handle *trans;
288 u64 isize = i_size_read(inode);
289 u64 actual_end = min(end + 1, isize);
290 u64 inline_len = actual_end - start;
291 u64 aligned_end = ALIGN(end, fs_info->sectorsize);
292 u64 data_len = inline_len;
293 int ret;
294 struct btrfs_path *path;
295 int extent_inserted = 0;
296 u32 extent_item_size;
297
298 if (compressed_size)
299 data_len = compressed_size;
300
301 if (start > 0 ||
302 actual_end > fs_info->sectorsize ||
303 data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) ||
304 (!compressed_size &&
305 (actual_end & (fs_info->sectorsize - 1)) == 0) ||
306 end + 1 < isize ||
307 data_len > fs_info->max_inline) {
308 return 1;
309 }
310
311 path = btrfs_alloc_path();
312 if (!path)
313 return -ENOMEM;
314
315 trans = btrfs_join_transaction(root);
316 if (IS_ERR(trans)) {
317 btrfs_free_path(path);
318 return PTR_ERR(trans);
319 }
320 trans->block_rsv = &fs_info->delalloc_block_rsv;
321
322 if (compressed_size && compressed_pages)
323 extent_item_size = btrfs_file_extent_calc_inline_size(
324 compressed_size);
325 else
326 extent_item_size = btrfs_file_extent_calc_inline_size(
327 inline_len);
328
329 ret = __btrfs_drop_extents(trans, root, inode, path,
330 start, aligned_end, NULL,
331 1, 1, extent_item_size, &extent_inserted);
332 if (ret) {
333 btrfs_abort_transaction(trans, ret);
334 goto out;
335 }
336
337 if (isize > actual_end)
338 inline_len = min_t(u64, isize, actual_end);
339 ret = insert_inline_extent(trans, path, extent_inserted,
340 root, inode, start,
341 inline_len, compressed_size,
342 compress_type, compressed_pages);
343 if (ret && ret != -ENOSPC) {
344 btrfs_abort_transaction(trans, ret);
345 goto out;
346 } else if (ret == -ENOSPC) {
347 ret = 1;
348 goto out;
349 }
350
351 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
352 btrfs_delalloc_release_metadata(BTRFS_I(inode), end + 1 - start);
353 btrfs_drop_extent_cache(BTRFS_I(inode), start, aligned_end - 1, 0);
354 out:
355 /*
356 * Don't forget to free the reserved space, as for inlined extent
357 * it won't count as data extent, free them directly here.
358 * And at reserve time, it's always aligned to page size, so
359 * just free one page here.
360 */
361 btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE);
362 btrfs_free_path(path);
363 btrfs_end_transaction(trans);
364 return ret;
365 }
366
367 struct async_extent {
368 u64 start;
369 u64 ram_size;
370 u64 compressed_size;
371 struct page **pages;
372 unsigned long nr_pages;
373 int compress_type;
374 struct list_head list;
375 };
376
377 struct async_cow {
378 struct inode *inode;
379 struct btrfs_root *root;
380 struct page *locked_page;
381 u64 start;
382 u64 end;
383 struct list_head extents;
384 struct btrfs_work work;
385 };
386
387 static noinline int add_async_extent(struct async_cow *cow,
388 u64 start, u64 ram_size,
389 u64 compressed_size,
390 struct page **pages,
391 unsigned long nr_pages,
392 int compress_type)
393 {
394 struct async_extent *async_extent;
395
396 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
397 BUG_ON(!async_extent); /* -ENOMEM */
398 async_extent->start = start;
399 async_extent->ram_size = ram_size;
400 async_extent->compressed_size = compressed_size;
401 async_extent->pages = pages;
402 async_extent->nr_pages = nr_pages;
403 async_extent->compress_type = compress_type;
404 list_add_tail(&async_extent->list, &cow->extents);
405 return 0;
406 }
407
408 static inline int inode_need_compress(struct inode *inode, u64 start, u64 end)
409 {
410 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
411
412 /* force compress */
413 if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
414 return 1;
415 /* defrag ioctl */
416 if (BTRFS_I(inode)->defrag_compress)
417 return 1;
418 /* bad compression ratios */
419 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
420 return 0;
421 if (btrfs_test_opt(fs_info, COMPRESS) ||
422 BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS ||
423 BTRFS_I(inode)->prop_compress)
424 return btrfs_compress_heuristic(inode, start, end);
425 return 0;
426 }
427
428 static inline void inode_should_defrag(struct btrfs_inode *inode,
429 u64 start, u64 end, u64 num_bytes, u64 small_write)
430 {
431 /* If this is a small write inside eof, kick off a defrag */
432 if (num_bytes < small_write &&
433 (start > 0 || end + 1 < inode->disk_i_size))
434 btrfs_add_inode_defrag(NULL, inode);
435 }
436
437 /*
438 * we create compressed extents in two phases. The first
439 * phase compresses a range of pages that have already been
440 * locked (both pages and state bits are locked).
441 *
442 * This is done inside an ordered work queue, and the compression
443 * is spread across many cpus. The actual IO submission is step
444 * two, and the ordered work queue takes care of making sure that
445 * happens in the same order things were put onto the queue by
446 * writepages and friends.
447 *
448 * If this code finds it can't get good compression, it puts an
449 * entry onto the work queue to write the uncompressed bytes. This
450 * makes sure that both compressed inodes and uncompressed inodes
451 * are written in the same order that the flusher thread sent them
452 * down.
453 */
454 static noinline void compress_file_range(struct inode *inode,
455 struct page *locked_page,
456 u64 start, u64 end,
457 struct async_cow *async_cow,
458 int *num_added)
459 {
460 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
461 struct btrfs_root *root = BTRFS_I(inode)->root;
462 u64 num_bytes;
463 u64 blocksize = fs_info->sectorsize;
464 u64 actual_end;
465 u64 isize = i_size_read(inode);
466 int ret = 0;
467 struct page **pages = NULL;
468 unsigned long nr_pages;
469 unsigned long total_compressed = 0;
470 unsigned long total_in = 0;
471 int i;
472 int will_compress;
473 int compress_type = fs_info->compress_type;
474 int redirty = 0;
475
476 inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1,
477 SZ_16K);
478
479 actual_end = min_t(u64, isize, end + 1);
480 again:
481 will_compress = 0;
482 nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
483 BUILD_BUG_ON((BTRFS_MAX_COMPRESSED % PAGE_SIZE) != 0);
484 nr_pages = min_t(unsigned long, nr_pages,
485 BTRFS_MAX_COMPRESSED / PAGE_SIZE);
486
487 /*
488 * we don't want to send crud past the end of i_size through
489 * compression, that's just a waste of CPU time. So, if the
490 * end of the file is before the start of our current
491 * requested range of bytes, we bail out to the uncompressed
492 * cleanup code that can deal with all of this.
493 *
494 * It isn't really the fastest way to fix things, but this is a
495 * very uncommon corner.
496 */
497 if (actual_end <= start)
498 goto cleanup_and_bail_uncompressed;
499
500 total_compressed = actual_end - start;
501
502 /*
503 * skip compression for a small file range(<=blocksize) that
504 * isn't an inline extent, since it doesn't save disk space at all.
505 */
506 if (total_compressed <= blocksize &&
507 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
508 goto cleanup_and_bail_uncompressed;
509
510 total_compressed = min_t(unsigned long, total_compressed,
511 BTRFS_MAX_UNCOMPRESSED);
512 num_bytes = ALIGN(end - start + 1, blocksize);
513 num_bytes = max(blocksize, num_bytes);
514 total_in = 0;
515 ret = 0;
516
517 /*
518 * we do compression for mount -o compress and when the
519 * inode has not been flagged as nocompress. This flag can
520 * change at any time if we discover bad compression ratios.
521 */
522 if (inode_need_compress(inode, start, end)) {
523 WARN_ON(pages);
524 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
525 if (!pages) {
526 /* just bail out to the uncompressed code */
527 nr_pages = 0;
528 goto cont;
529 }
530
531 if (BTRFS_I(inode)->defrag_compress)
532 compress_type = BTRFS_I(inode)->defrag_compress;
533 else if (BTRFS_I(inode)->prop_compress)
534 compress_type = BTRFS_I(inode)->prop_compress;
535
536 /*
537 * we need to call clear_page_dirty_for_io on each
538 * page in the range. Otherwise applications with the file
539 * mmap'd can wander in and change the page contents while
540 * we are compressing them.
541 *
542 * If the compression fails for any reason, we set the pages
543 * dirty again later on.
544 */
545 extent_range_clear_dirty_for_io(inode, start, end);
546 redirty = 1;
547 ret = btrfs_compress_pages(compress_type,
548 inode->i_mapping, start,
549 pages,
550 &nr_pages,
551 &total_in,
552 &total_compressed);
553
554 if (!ret) {
555 unsigned long offset = total_compressed &
556 (PAGE_SIZE - 1);
557 struct page *page = pages[nr_pages - 1];
558 char *kaddr;
559
560 /* zero the tail end of the last page, we might be
561 * sending it down to disk
562 */
563 if (offset) {
564 kaddr = kmap_atomic(page);
565 memset(kaddr + offset, 0,
566 PAGE_SIZE - offset);
567 kunmap_atomic(kaddr);
568 }
569 will_compress = 1;
570 }
571 }
572 cont:
573 if (start == 0) {
574 /* lets try to make an inline extent */
575 if (ret || total_in < (actual_end - start)) {
576 /* we didn't compress the entire range, try
577 * to make an uncompressed inline extent.
578 */
579 ret = cow_file_range_inline(root, inode, start, end,
580 0, BTRFS_COMPRESS_NONE, NULL);
581 } else {
582 /* try making a compressed inline extent */
583 ret = cow_file_range_inline(root, inode, start, end,
584 total_compressed,
585 compress_type, pages);
586 }
587 if (ret <= 0) {
588 unsigned long clear_flags = EXTENT_DELALLOC |
589 EXTENT_DELALLOC_NEW | EXTENT_DEFRAG;
590 unsigned long page_error_op;
591
592 clear_flags |= (ret < 0) ? EXTENT_DO_ACCOUNTING : 0;
593 page_error_op = ret < 0 ? PAGE_SET_ERROR : 0;
594
595 /*
596 * inline extent creation worked or returned error,
597 * we don't need to create any more async work items.
598 * Unlock and free up our temp pages.
599 */
600 extent_clear_unlock_delalloc(inode, start, end, end,
601 NULL, clear_flags,
602 PAGE_UNLOCK |
603 PAGE_CLEAR_DIRTY |
604 PAGE_SET_WRITEBACK |
605 page_error_op |
606 PAGE_END_WRITEBACK);
607 if (ret == 0)
608 btrfs_free_reserved_data_space_noquota(inode,
609 start,
610 end - start + 1);
611 goto free_pages_out;
612 }
613 }
614
615 if (will_compress) {
616 /*
617 * we aren't doing an inline extent round the compressed size
618 * up to a block size boundary so the allocator does sane
619 * things
620 */
621 total_compressed = ALIGN(total_compressed, blocksize);
622
623 /*
624 * one last check to make sure the compression is really a
625 * win, compare the page count read with the blocks on disk,
626 * compression must free at least one sector size
627 */
628 total_in = ALIGN(total_in, PAGE_SIZE);
629 if (total_compressed + blocksize <= total_in) {
630 num_bytes = total_in;
631 *num_added += 1;
632
633 /*
634 * The async work queues will take care of doing actual
635 * allocation on disk for these compressed pages, and
636 * will submit them to the elevator.
637 */
638 add_async_extent(async_cow, start, num_bytes,
639 total_compressed, pages, nr_pages,
640 compress_type);
641
642 if (start + num_bytes < end) {
643 start += num_bytes;
644 pages = NULL;
645 cond_resched();
646 goto again;
647 }
648 return;
649 }
650 }
651 if (pages) {
652 /*
653 * the compression code ran but failed to make things smaller,
654 * free any pages it allocated and our page pointer array
655 */
656 for (i = 0; i < nr_pages; i++) {
657 WARN_ON(pages[i]->mapping);
658 put_page(pages[i]);
659 }
660 kfree(pages);
661 pages = NULL;
662 total_compressed = 0;
663 nr_pages = 0;
664
665 /* flag the file so we don't compress in the future */
666 if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) &&
667 !(BTRFS_I(inode)->prop_compress)) {
668 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
669 }
670 }
671 cleanup_and_bail_uncompressed:
672 /*
673 * No compression, but we still need to write the pages in the file
674 * we've been given so far. redirty the locked page if it corresponds
675 * to our extent and set things up for the async work queue to run
676 * cow_file_range to do the normal delalloc dance.
677 */
678 if (page_offset(locked_page) >= start &&
679 page_offset(locked_page) <= end)
680 __set_page_dirty_nobuffers(locked_page);
681 /* unlocked later on in the async handlers */
682
683 if (redirty)
684 extent_range_redirty_for_io(inode, start, end);
685 add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0,
686 BTRFS_COMPRESS_NONE);
687 *num_added += 1;
688
689 return;
690
691 free_pages_out:
692 for (i = 0; i < nr_pages; i++) {
693 WARN_ON(pages[i]->mapping);
694 put_page(pages[i]);
695 }
696 kfree(pages);
697 }
698
699 static void free_async_extent_pages(struct async_extent *async_extent)
700 {
701 int i;
702
703 if (!async_extent->pages)
704 return;
705
706 for (i = 0; i < async_extent->nr_pages; i++) {
707 WARN_ON(async_extent->pages[i]->mapping);
708 put_page(async_extent->pages[i]);
709 }
710 kfree(async_extent->pages);
711 async_extent->nr_pages = 0;
712 async_extent->pages = NULL;
713 }
714
715 /*
716 * phase two of compressed writeback. This is the ordered portion
717 * of the code, which only gets called in the order the work was
718 * queued. We walk all the async extents created by compress_file_range
719 * and send them down to the disk.
720 */
721 static noinline void submit_compressed_extents(struct inode *inode,
722 struct async_cow *async_cow)
723 {
724 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
725 struct async_extent *async_extent;
726 u64 alloc_hint = 0;
727 struct btrfs_key ins;
728 struct extent_map *em;
729 struct btrfs_root *root = BTRFS_I(inode)->root;
730 struct extent_io_tree *io_tree;
731 int ret = 0;
732
733 again:
734 while (!list_empty(&async_cow->extents)) {
735 async_extent = list_entry(async_cow->extents.next,
736 struct async_extent, list);
737 list_del(&async_extent->list);
738
739 io_tree = &BTRFS_I(inode)->io_tree;
740
741 retry:
742 /* did the compression code fall back to uncompressed IO? */
743 if (!async_extent->pages) {
744 int page_started = 0;
745 unsigned long nr_written = 0;
746
747 lock_extent(io_tree, async_extent->start,
748 async_extent->start +
749 async_extent->ram_size - 1);
750
751 /* allocate blocks */
752 ret = cow_file_range(inode, async_cow->locked_page,
753 async_extent->start,
754 async_extent->start +
755 async_extent->ram_size - 1,
756 async_extent->start +
757 async_extent->ram_size - 1,
758 &page_started, &nr_written, 0,
759 NULL);
760
761 /* JDM XXX */
762
763 /*
764 * if page_started, cow_file_range inserted an
765 * inline extent and took care of all the unlocking
766 * and IO for us. Otherwise, we need to submit
767 * all those pages down to the drive.
768 */
769 if (!page_started && !ret)
770 extent_write_locked_range(io_tree,
771 inode, async_extent->start,
772 async_extent->start +
773 async_extent->ram_size - 1,
774 btrfs_get_extent,
775 WB_SYNC_ALL);
776 else if (ret)
777 unlock_page(async_cow->locked_page);
778 kfree(async_extent);
779 cond_resched();
780 continue;
781 }
782
783 lock_extent(io_tree, async_extent->start,
784 async_extent->start + async_extent->ram_size - 1);
785
786 ret = btrfs_reserve_extent(root, async_extent->ram_size,
787 async_extent->compressed_size,
788 async_extent->compressed_size,
789 0, alloc_hint, &ins, 1, 1);
790 if (ret) {
791 free_async_extent_pages(async_extent);
792
793 if (ret == -ENOSPC) {
794 unlock_extent(io_tree, async_extent->start,
795 async_extent->start +
796 async_extent->ram_size - 1);
797
798 /*
799 * we need to redirty the pages if we decide to
800 * fallback to uncompressed IO, otherwise we
801 * will not submit these pages down to lower
802 * layers.
803 */
804 extent_range_redirty_for_io(inode,
805 async_extent->start,
806 async_extent->start +
807 async_extent->ram_size - 1);
808
809 goto retry;
810 }
811 goto out_free;
812 }
813 /*
814 * here we're doing allocation and writeback of the
815 * compressed pages
816 */
817 em = create_io_em(inode, async_extent->start,
818 async_extent->ram_size, /* len */
819 async_extent->start, /* orig_start */
820 ins.objectid, /* block_start */
821 ins.offset, /* block_len */
822 ins.offset, /* orig_block_len */
823 async_extent->ram_size, /* ram_bytes */
824 async_extent->compress_type,
825 BTRFS_ORDERED_COMPRESSED);
826 if (IS_ERR(em))
827 /* ret value is not necessary due to void function */
828 goto out_free_reserve;
829 free_extent_map(em);
830
831 ret = btrfs_add_ordered_extent_compress(inode,
832 async_extent->start,
833 ins.objectid,
834 async_extent->ram_size,
835 ins.offset,
836 BTRFS_ORDERED_COMPRESSED,
837 async_extent->compress_type);
838 if (ret) {
839 btrfs_drop_extent_cache(BTRFS_I(inode),
840 async_extent->start,
841 async_extent->start +
842 async_extent->ram_size - 1, 0);
843 goto out_free_reserve;
844 }
845 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
846
847 /*
848 * clear dirty, set writeback and unlock the pages.
849 */
850 extent_clear_unlock_delalloc(inode, async_extent->start,
851 async_extent->start +
852 async_extent->ram_size - 1,
853 async_extent->start +
854 async_extent->ram_size - 1,
855 NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
856 PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
857 PAGE_SET_WRITEBACK);
858 if (btrfs_submit_compressed_write(inode,
859 async_extent->start,
860 async_extent->ram_size,
861 ins.objectid,
862 ins.offset, async_extent->pages,
863 async_extent->nr_pages)) {
864 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
865 struct page *p = async_extent->pages[0];
866 const u64 start = async_extent->start;
867 const u64 end = start + async_extent->ram_size - 1;
868
869 p->mapping = inode->i_mapping;
870 tree->ops->writepage_end_io_hook(p, start, end,
871 NULL, 0);
872 p->mapping = NULL;
873 extent_clear_unlock_delalloc(inode, start, end, end,
874 NULL, 0,
875 PAGE_END_WRITEBACK |
876 PAGE_SET_ERROR);
877 free_async_extent_pages(async_extent);
878 }
879 alloc_hint = ins.objectid + ins.offset;
880 kfree(async_extent);
881 cond_resched();
882 }
883 return;
884 out_free_reserve:
885 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
886 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
887 out_free:
888 extent_clear_unlock_delalloc(inode, async_extent->start,
889 async_extent->start +
890 async_extent->ram_size - 1,
891 async_extent->start +
892 async_extent->ram_size - 1,
893 NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
894 EXTENT_DELALLOC_NEW |
895 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
896 PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
897 PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK |
898 PAGE_SET_ERROR);
899 free_async_extent_pages(async_extent);
900 kfree(async_extent);
901 goto again;
902 }
903
904 static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
905 u64 num_bytes)
906 {
907 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
908 struct extent_map *em;
909 u64 alloc_hint = 0;
910
911 read_lock(&em_tree->lock);
912 em = search_extent_mapping(em_tree, start, num_bytes);
913 if (em) {
914 /*
915 * if block start isn't an actual block number then find the
916 * first block in this inode and use that as a hint. If that
917 * block is also bogus then just don't worry about it.
918 */
919 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
920 free_extent_map(em);
921 em = search_extent_mapping(em_tree, 0, 0);
922 if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
923 alloc_hint = em->block_start;
924 if (em)
925 free_extent_map(em);
926 } else {
927 alloc_hint = em->block_start;
928 free_extent_map(em);
929 }
930 }
931 read_unlock(&em_tree->lock);
932
933 return alloc_hint;
934 }
935
936 /*
937 * when extent_io.c finds a delayed allocation range in the file,
938 * the call backs end up in this code. The basic idea is to
939 * allocate extents on disk for the range, and create ordered data structs
940 * in ram to track those extents.
941 *
942 * locked_page is the page that writepage had locked already. We use
943 * it to make sure we don't do extra locks or unlocks.
944 *
945 * *page_started is set to one if we unlock locked_page and do everything
946 * required to start IO on it. It may be clean and already done with
947 * IO when we return.
948 */
949 static noinline int cow_file_range(struct inode *inode,
950 struct page *locked_page,
951 u64 start, u64 end, u64 delalloc_end,
952 int *page_started, unsigned long *nr_written,
953 int unlock, struct btrfs_dedupe_hash *hash)
954 {
955 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
956 struct btrfs_root *root = BTRFS_I(inode)->root;
957 u64 alloc_hint = 0;
958 u64 num_bytes;
959 unsigned long ram_size;
960 u64 disk_num_bytes;
961 u64 cur_alloc_size = 0;
962 u64 blocksize = fs_info->sectorsize;
963 struct btrfs_key ins;
964 struct extent_map *em;
965 unsigned clear_bits;
966 unsigned long page_ops;
967 bool extent_reserved = false;
968 int ret = 0;
969
970 if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
971 WARN_ON_ONCE(1);
972 ret = -EINVAL;
973 goto out_unlock;
974 }
975
976 num_bytes = ALIGN(end - start + 1, blocksize);
977 num_bytes = max(blocksize, num_bytes);
978 disk_num_bytes = num_bytes;
979
980 inode_should_defrag(BTRFS_I(inode), start, end, num_bytes, SZ_64K);
981
982 if (start == 0) {
983 /* lets try to make an inline extent */
984 ret = cow_file_range_inline(root, inode, start, end, 0,
985 BTRFS_COMPRESS_NONE, NULL);
986 if (ret == 0) {
987 extent_clear_unlock_delalloc(inode, start, end,
988 delalloc_end, NULL,
989 EXTENT_LOCKED | EXTENT_DELALLOC |
990 EXTENT_DELALLOC_NEW |
991 EXTENT_DEFRAG, PAGE_UNLOCK |
992 PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
993 PAGE_END_WRITEBACK);
994 btrfs_free_reserved_data_space_noquota(inode, start,
995 end - start + 1);
996 *nr_written = *nr_written +
997 (end - start + PAGE_SIZE) / PAGE_SIZE;
998 *page_started = 1;
999 goto out;
1000 } else if (ret < 0) {
1001 goto out_unlock;
1002 }
1003 }
1004
1005 BUG_ON(disk_num_bytes >
1006 btrfs_super_total_bytes(fs_info->super_copy));
1007
1008 alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
1009 btrfs_drop_extent_cache(BTRFS_I(inode), start,
1010 start + num_bytes - 1, 0);
1011
1012 while (disk_num_bytes > 0) {
1013 cur_alloc_size = disk_num_bytes;
1014 ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
1015 fs_info->sectorsize, 0, alloc_hint,
1016 &ins, 1, 1);
1017 if (ret < 0)
1018 goto out_unlock;
1019 cur_alloc_size = ins.offset;
1020 extent_reserved = true;
1021
1022 ram_size = ins.offset;
1023 em = create_io_em(inode, start, ins.offset, /* len */
1024 start, /* orig_start */
1025 ins.objectid, /* block_start */
1026 ins.offset, /* block_len */
1027 ins.offset, /* orig_block_len */
1028 ram_size, /* ram_bytes */
1029 BTRFS_COMPRESS_NONE, /* compress_type */
1030 BTRFS_ORDERED_REGULAR /* type */);
1031 if (IS_ERR(em)) {
1032 ret = PTR_ERR(em);
1033 goto out_reserve;
1034 }
1035 free_extent_map(em);
1036
1037 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
1038 ram_size, cur_alloc_size, 0);
1039 if (ret)
1040 goto out_drop_extent_cache;
1041
1042 if (root->root_key.objectid ==
1043 BTRFS_DATA_RELOC_TREE_OBJECTID) {
1044 ret = btrfs_reloc_clone_csums(inode, start,
1045 cur_alloc_size);
1046 /*
1047 * Only drop cache here, and process as normal.
1048 *
1049 * We must not allow extent_clear_unlock_delalloc()
1050 * at out_unlock label to free meta of this ordered
1051 * extent, as its meta should be freed by
1052 * btrfs_finish_ordered_io().
1053 *
1054 * So we must continue until @start is increased to
1055 * skip current ordered extent.
1056 */
1057 if (ret)
1058 btrfs_drop_extent_cache(BTRFS_I(inode), start,
1059 start + ram_size - 1, 0);
1060 }
1061
1062 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1063
1064 /* we're not doing compressed IO, don't unlock the first
1065 * page (which the caller expects to stay locked), don't
1066 * clear any dirty bits and don't set any writeback bits
1067 *
1068 * Do set the Private2 bit so we know this page was properly
1069 * setup for writepage
1070 */
1071 page_ops = unlock ? PAGE_UNLOCK : 0;
1072 page_ops |= PAGE_SET_PRIVATE2;
1073
1074 extent_clear_unlock_delalloc(inode, start,
1075 start + ram_size - 1,
1076 delalloc_end, locked_page,
1077 EXTENT_LOCKED | EXTENT_DELALLOC,
1078 page_ops);
1079 if (disk_num_bytes < cur_alloc_size)
1080 disk_num_bytes = 0;
1081 else
1082 disk_num_bytes -= cur_alloc_size;
1083 num_bytes -= cur_alloc_size;
1084 alloc_hint = ins.objectid + ins.offset;
1085 start += cur_alloc_size;
1086 extent_reserved = false;
1087
1088 /*
1089 * btrfs_reloc_clone_csums() error, since start is increased
1090 * extent_clear_unlock_delalloc() at out_unlock label won't
1091 * free metadata of current ordered extent, we're OK to exit.
1092 */
1093 if (ret)
1094 goto out_unlock;
1095 }
1096 out:
1097 return ret;
1098
1099 out_drop_extent_cache:
1100 btrfs_drop_extent_cache(BTRFS_I(inode), start, start + ram_size - 1, 0);
1101 out_reserve:
1102 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1103 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
1104 out_unlock:
1105 clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
1106 EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
1107 page_ops = PAGE_UNLOCK | PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
1108 PAGE_END_WRITEBACK;
1109 /*
1110 * If we reserved an extent for our delalloc range (or a subrange) and
1111 * failed to create the respective ordered extent, then it means that
1112 * when we reserved the extent we decremented the extent's size from
1113 * the data space_info's bytes_may_use counter and incremented the
1114 * space_info's bytes_reserved counter by the same amount. We must make
1115 * sure extent_clear_unlock_delalloc() does not try to decrement again
1116 * the data space_info's bytes_may_use counter, therefore we do not pass
1117 * it the flag EXTENT_CLEAR_DATA_RESV.
1118 */
1119 if (extent_reserved) {
1120 extent_clear_unlock_delalloc(inode, start,
1121 start + cur_alloc_size,
1122 start + cur_alloc_size,
1123 locked_page,
1124 clear_bits,
1125 page_ops);
1126 start += cur_alloc_size;
1127 if (start >= end)
1128 goto out;
1129 }
1130 extent_clear_unlock_delalloc(inode, start, end, delalloc_end,
1131 locked_page,
1132 clear_bits | EXTENT_CLEAR_DATA_RESV,
1133 page_ops);
1134 goto out;
1135 }
1136
1137 /*
1138 * work queue call back to started compression on a file and pages
1139 */
1140 static noinline void async_cow_start(struct btrfs_work *work)
1141 {
1142 struct async_cow *async_cow;
1143 int num_added = 0;
1144 async_cow = container_of(work, struct async_cow, work);
1145
1146 compress_file_range(async_cow->inode, async_cow->locked_page,
1147 async_cow->start, async_cow->end, async_cow,
1148 &num_added);
1149 if (num_added == 0) {
1150 btrfs_add_delayed_iput(async_cow->inode);
1151 async_cow->inode = NULL;
1152 }
1153 }
1154
1155 /*
1156 * work queue call back to submit previously compressed pages
1157 */
1158 static noinline void async_cow_submit(struct btrfs_work *work)
1159 {
1160 struct btrfs_fs_info *fs_info;
1161 struct async_cow *async_cow;
1162 struct btrfs_root *root;
1163 unsigned long nr_pages;
1164
1165 async_cow = container_of(work, struct async_cow, work);
1166
1167 root = async_cow->root;
1168 fs_info = root->fs_info;
1169 nr_pages = (async_cow->end - async_cow->start + PAGE_SIZE) >>
1170 PAGE_SHIFT;
1171
1172 /*
1173 * atomic_sub_return implies a barrier for waitqueue_active
1174 */
1175 if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
1176 5 * SZ_1M &&
1177 waitqueue_active(&fs_info->async_submit_wait))
1178 wake_up(&fs_info->async_submit_wait);
1179
1180 if (async_cow->inode)
1181 submit_compressed_extents(async_cow->inode, async_cow);
1182 }
1183
1184 static noinline void async_cow_free(struct btrfs_work *work)
1185 {
1186 struct async_cow *async_cow;
1187 async_cow = container_of(work, struct async_cow, work);
1188 if (async_cow->inode)
1189 btrfs_add_delayed_iput(async_cow->inode);
1190 kfree(async_cow);
1191 }
1192
1193 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1194 u64 start, u64 end, int *page_started,
1195 unsigned long *nr_written)
1196 {
1197 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1198 struct async_cow *async_cow;
1199 struct btrfs_root *root = BTRFS_I(inode)->root;
1200 unsigned long nr_pages;
1201 u64 cur_end;
1202
1203 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1204 1, 0, NULL, GFP_NOFS);
1205 while (start < end) {
1206 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
1207 BUG_ON(!async_cow); /* -ENOMEM */
1208 async_cow->inode = igrab(inode);
1209 async_cow->root = root;
1210 async_cow->locked_page = locked_page;
1211 async_cow->start = start;
1212
1213 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS &&
1214 !btrfs_test_opt(fs_info, FORCE_COMPRESS))
1215 cur_end = end;
1216 else
1217 cur_end = min(end, start + SZ_512K - 1);
1218
1219 async_cow->end = cur_end;
1220 INIT_LIST_HEAD(&async_cow->extents);
1221
1222 btrfs_init_work(&async_cow->work,
1223 btrfs_delalloc_helper,
1224 async_cow_start, async_cow_submit,
1225 async_cow_free);
1226
1227 nr_pages = (cur_end - start + PAGE_SIZE) >>
1228 PAGE_SHIFT;
1229 atomic_add(nr_pages, &fs_info->async_delalloc_pages);
1230
1231 btrfs_queue_work(fs_info->delalloc_workers, &async_cow->work);
1232
1233 while (atomic_read(&fs_info->async_submit_draining) &&
1234 atomic_read(&fs_info->async_delalloc_pages)) {
1235 wait_event(fs_info->async_submit_wait,
1236 (atomic_read(&fs_info->async_delalloc_pages) ==
1237 0));
1238 }
1239
1240 *nr_written += nr_pages;
1241 start = cur_end + 1;
1242 }
1243 *page_started = 1;
1244 return 0;
1245 }
1246
1247 static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
1248 u64 bytenr, u64 num_bytes)
1249 {
1250 int ret;
1251 struct btrfs_ordered_sum *sums;
1252 LIST_HEAD(list);
1253
1254 ret = btrfs_lookup_csums_range(fs_info->csum_root, bytenr,
1255 bytenr + num_bytes - 1, &list, 0);
1256 if (ret == 0 && list_empty(&list))
1257 return 0;
1258
1259 while (!list_empty(&list)) {
1260 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1261 list_del(&sums->list);
1262 kfree(sums);
1263 }
1264 if (ret < 0)
1265 return ret;
1266 return 1;
1267 }
1268
1269 /*
1270 * when nowcow writeback call back. This checks for snapshots or COW copies
1271 * of the extents that exist in the file, and COWs the file as required.
1272 *
1273 * If no cow copies or snapshots exist, we write directly to the existing
1274 * blocks on disk
1275 */
1276 static noinline int run_delalloc_nocow(struct inode *inode,
1277 struct page *locked_page,
1278 u64 start, u64 end, int *page_started, int force,
1279 unsigned long *nr_written)
1280 {
1281 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1282 struct btrfs_root *root = BTRFS_I(inode)->root;
1283 struct extent_buffer *leaf;
1284 struct btrfs_path *path;
1285 struct btrfs_file_extent_item *fi;
1286 struct btrfs_key found_key;
1287 struct extent_map *em;
1288 u64 cow_start;
1289 u64 cur_offset;
1290 u64 extent_end;
1291 u64 extent_offset;
1292 u64 disk_bytenr;
1293 u64 num_bytes;
1294 u64 disk_num_bytes;
1295 u64 ram_bytes;
1296 int extent_type;
1297 int ret, err;
1298 int type;
1299 int nocow;
1300 int check_prev = 1;
1301 bool nolock;
1302 u64 ino = btrfs_ino(BTRFS_I(inode));
1303
1304 path = btrfs_alloc_path();
1305 if (!path) {
1306 extent_clear_unlock_delalloc(inode, start, end, end,
1307 locked_page,
1308 EXTENT_LOCKED | EXTENT_DELALLOC |
1309 EXTENT_DO_ACCOUNTING |
1310 EXTENT_DEFRAG, PAGE_UNLOCK |
1311 PAGE_CLEAR_DIRTY |
1312 PAGE_SET_WRITEBACK |
1313 PAGE_END_WRITEBACK);
1314 return -ENOMEM;
1315 }
1316
1317 nolock = btrfs_is_free_space_inode(BTRFS_I(inode));
1318
1319 cow_start = (u64)-1;
1320 cur_offset = start;
1321 while (1) {
1322 ret = btrfs_lookup_file_extent(NULL, root, path, ino,
1323 cur_offset, 0);
1324 if (ret < 0)
1325 goto error;
1326 if (ret > 0 && path->slots[0] > 0 && check_prev) {
1327 leaf = path->nodes[0];
1328 btrfs_item_key_to_cpu(leaf, &found_key,
1329 path->slots[0] - 1);
1330 if (found_key.objectid == ino &&
1331 found_key.type == BTRFS_EXTENT_DATA_KEY)
1332 path->slots[0]--;
1333 }
1334 check_prev = 0;
1335 next_slot:
1336 leaf = path->nodes[0];
1337 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1338 ret = btrfs_next_leaf(root, path);
1339 if (ret < 0) {
1340 if (cow_start != (u64)-1)
1341 cur_offset = cow_start;
1342 goto error;
1343 }
1344 if (ret > 0)
1345 break;
1346 leaf = path->nodes[0];
1347 }
1348
1349 nocow = 0;
1350 disk_bytenr = 0;
1351 num_bytes = 0;
1352 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1353
1354 if (found_key.objectid > ino)
1355 break;
1356 if (WARN_ON_ONCE(found_key.objectid < ino) ||
1357 found_key.type < BTRFS_EXTENT_DATA_KEY) {
1358 path->slots[0]++;
1359 goto next_slot;
1360 }
1361 if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
1362 found_key.offset > end)
1363 break;
1364
1365 if (found_key.offset > cur_offset) {
1366 extent_end = found_key.offset;
1367 extent_type = 0;
1368 goto out_check;
1369 }
1370
1371 fi = btrfs_item_ptr(leaf, path->slots[0],
1372 struct btrfs_file_extent_item);
1373 extent_type = btrfs_file_extent_type(leaf, fi);
1374
1375 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1376 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1377 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1378 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1379 extent_offset = btrfs_file_extent_offset(leaf, fi);
1380 extent_end = found_key.offset +
1381 btrfs_file_extent_num_bytes(leaf, fi);
1382 disk_num_bytes =
1383 btrfs_file_extent_disk_num_bytes(leaf, fi);
1384 if (extent_end <= start) {
1385 path->slots[0]++;
1386 goto next_slot;
1387 }
1388 if (disk_bytenr == 0)
1389 goto out_check;
1390 if (btrfs_file_extent_compression(leaf, fi) ||
1391 btrfs_file_extent_encryption(leaf, fi) ||
1392 btrfs_file_extent_other_encoding(leaf, fi))
1393 goto out_check;
1394 if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1395 goto out_check;
1396 if (btrfs_extent_readonly(fs_info, disk_bytenr))
1397 goto out_check;
1398 ret = btrfs_cross_ref_exist(root, ino,
1399 found_key.offset -
1400 extent_offset, disk_bytenr);
1401 if (ret) {
1402 /*
1403 * ret could be -EIO if the above fails to read
1404 * metadata.
1405 */
1406 if (ret < 0) {
1407 if (cow_start != (u64)-1)
1408 cur_offset = cow_start;
1409 goto error;
1410 }
1411
1412 WARN_ON_ONCE(nolock);
1413 goto out_check;
1414 }
1415 disk_bytenr += extent_offset;
1416 disk_bytenr += cur_offset - found_key.offset;
1417 num_bytes = min(end + 1, extent_end) - cur_offset;
1418 /*
1419 * if there are pending snapshots for this root,
1420 * we fall into common COW way.
1421 */
1422 if (!nolock) {
1423 err = btrfs_start_write_no_snapshotting(root);
1424 if (!err)
1425 goto out_check;
1426 }
1427 /*
1428 * force cow if csum exists in the range.
1429 * this ensure that csum for a given extent are
1430 * either valid or do not exist.
1431 */
1432 ret = csum_exist_in_range(fs_info, disk_bytenr,
1433 num_bytes);
1434 if (ret) {
1435 if (!nolock)
1436 btrfs_end_write_no_snapshotting(root);
1437
1438 /*
1439 * ret could be -EIO if the above fails to read
1440 * metadata.
1441 */
1442 if (ret < 0) {
1443 if (cow_start != (u64)-1)
1444 cur_offset = cow_start;
1445 goto error;
1446 }
1447 WARN_ON_ONCE(nolock);
1448 goto out_check;
1449 }
1450 if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr)) {
1451 if (!nolock)
1452 btrfs_end_write_no_snapshotting(root);
1453 goto out_check;
1454 }
1455 nocow = 1;
1456 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1457 extent_end = found_key.offset +
1458 btrfs_file_extent_inline_len(leaf,
1459 path->slots[0], fi);
1460 extent_end = ALIGN(extent_end,
1461 fs_info->sectorsize);
1462 } else {
1463 BUG_ON(1);
1464 }
1465 out_check:
1466 if (extent_end <= start) {
1467 path->slots[0]++;
1468 if (!nolock && nocow)
1469 btrfs_end_write_no_snapshotting(root);
1470 if (nocow)
1471 btrfs_dec_nocow_writers(fs_info, disk_bytenr);
1472 goto next_slot;
1473 }
1474 if (!nocow) {
1475 if (cow_start == (u64)-1)
1476 cow_start = cur_offset;
1477 cur_offset = extent_end;
1478 if (cur_offset > end)
1479 break;
1480 path->slots[0]++;
1481 goto next_slot;
1482 }
1483
1484 btrfs_release_path(path);
1485 if (cow_start != (u64)-1) {
1486 ret = cow_file_range(inode, locked_page,
1487 cow_start, found_key.offset - 1,
1488 end, page_started, nr_written, 1,
1489 NULL);
1490 if (ret) {
1491 if (!nolock && nocow)
1492 btrfs_end_write_no_snapshotting(root);
1493 if (nocow)
1494 btrfs_dec_nocow_writers(fs_info,
1495 disk_bytenr);
1496 goto error;
1497 }
1498 cow_start = (u64)-1;
1499 }
1500
1501 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1502 u64 orig_start = found_key.offset - extent_offset;
1503
1504 em = create_io_em(inode, cur_offset, num_bytes,
1505 orig_start,
1506 disk_bytenr, /* block_start */
1507 num_bytes, /* block_len */
1508 disk_num_bytes, /* orig_block_len */
1509 ram_bytes, BTRFS_COMPRESS_NONE,
1510 BTRFS_ORDERED_PREALLOC);
1511 if (IS_ERR(em)) {
1512 if (!nolock && nocow)
1513 btrfs_end_write_no_snapshotting(root);
1514 if (nocow)
1515 btrfs_dec_nocow_writers(fs_info,
1516 disk_bytenr);
1517 ret = PTR_ERR(em);
1518 goto error;
1519 }
1520 free_extent_map(em);
1521 }
1522
1523 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1524 type = BTRFS_ORDERED_PREALLOC;
1525 } else {
1526 type = BTRFS_ORDERED_NOCOW;
1527 }
1528
1529 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1530 num_bytes, num_bytes, type);
1531 if (nocow)
1532 btrfs_dec_nocow_writers(fs_info, disk_bytenr);
1533 BUG_ON(ret); /* -ENOMEM */
1534
1535 if (root->root_key.objectid ==
1536 BTRFS_DATA_RELOC_TREE_OBJECTID)
1537 /*
1538 * Error handled later, as we must prevent
1539 * extent_clear_unlock_delalloc() in error handler
1540 * from freeing metadata of created ordered extent.
1541 */
1542 ret = btrfs_reloc_clone_csums(inode, cur_offset,
1543 num_bytes);
1544
1545 extent_clear_unlock_delalloc(inode, cur_offset,
1546 cur_offset + num_bytes - 1, end,
1547 locked_page, EXTENT_LOCKED |
1548 EXTENT_DELALLOC |
1549 EXTENT_CLEAR_DATA_RESV,
1550 PAGE_UNLOCK | PAGE_SET_PRIVATE2);
1551
1552 if (!nolock && nocow)
1553 btrfs_end_write_no_snapshotting(root);
1554 cur_offset = extent_end;
1555
1556 /*
1557 * btrfs_reloc_clone_csums() error, now we're OK to call error
1558 * handler, as metadata for created ordered extent will only
1559 * be freed by btrfs_finish_ordered_io().
1560 */
1561 if (ret)
1562 goto error;
1563 if (cur_offset > end)
1564 break;
1565 }
1566 btrfs_release_path(path);
1567
1568 if (cur_offset <= end && cow_start == (u64)-1)
1569 cow_start = cur_offset;
1570
1571 if (cow_start != (u64)-1) {
1572 cur_offset = end;
1573 ret = cow_file_range(inode, locked_page, cow_start, end, end,
1574 page_started, nr_written, 1, NULL);
1575 if (ret)
1576 goto error;
1577 }
1578
1579 error:
1580 if (ret && cur_offset < end)
1581 extent_clear_unlock_delalloc(inode, cur_offset, end, end,
1582 locked_page, EXTENT_LOCKED |
1583 EXTENT_DELALLOC | EXTENT_DEFRAG |
1584 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
1585 PAGE_CLEAR_DIRTY |
1586 PAGE_SET_WRITEBACK |
1587 PAGE_END_WRITEBACK);
1588 btrfs_free_path(path);
1589 return ret;
1590 }
1591
1592 static inline int need_force_cow(struct inode *inode, u64 start, u64 end)
1593 {
1594
1595 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
1596 !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC))
1597 return 0;
1598
1599 /*
1600 * @defrag_bytes is a hint value, no spinlock held here,
1601 * if is not zero, it means the file is defragging.
1602 * Force cow if given extent needs to be defragged.
1603 */
1604 if (BTRFS_I(inode)->defrag_bytes &&
1605 test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
1606 EXTENT_DEFRAG, 0, NULL))
1607 return 1;
1608
1609 return 0;
1610 }
1611
1612 /*
1613 * extent_io.c call back to do delayed allocation processing
1614 */
1615 static int run_delalloc_range(void *private_data, struct page *locked_page,
1616 u64 start, u64 end, int *page_started,
1617 unsigned long *nr_written)
1618 {
1619 struct inode *inode = private_data;
1620 int ret;
1621 int force_cow = need_force_cow(inode, start, end);
1622
1623 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) {
1624 ret = run_delalloc_nocow(inode, locked_page, start, end,
1625 page_started, 1, nr_written);
1626 } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) {
1627 ret = run_delalloc_nocow(inode, locked_page, start, end,
1628 page_started, 0, nr_written);
1629 } else if (!inode_need_compress(inode, start, end)) {
1630 ret = cow_file_range(inode, locked_page, start, end, end,
1631 page_started, nr_written, 1, NULL);
1632 } else {
1633 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1634 &BTRFS_I(inode)->runtime_flags);
1635 ret = cow_file_range_async(inode, locked_page, start, end,
1636 page_started, nr_written);
1637 }
1638 if (ret)
1639 btrfs_cleanup_ordered_extents(inode, start, end - start + 1);
1640 return ret;
1641 }
1642
1643 static void btrfs_split_extent_hook(void *private_data,
1644 struct extent_state *orig, u64 split)
1645 {
1646 struct inode *inode = private_data;
1647 u64 size;
1648
1649 /* not delalloc, ignore it */
1650 if (!(orig->state & EXTENT_DELALLOC))
1651 return;
1652
1653 size = orig->end - orig->start + 1;
1654 if (size > BTRFS_MAX_EXTENT_SIZE) {
1655 u32 num_extents;
1656 u64 new_size;
1657
1658 /*
1659 * See the explanation in btrfs_merge_extent_hook, the same
1660 * applies here, just in reverse.
1661 */
1662 new_size = orig->end - split + 1;
1663 num_extents = count_max_extents(new_size);
1664 new_size = split - orig->start;
1665 num_extents += count_max_extents(new_size);
1666 if (count_max_extents(size) >= num_extents)
1667 return;
1668 }
1669
1670 spin_lock(&BTRFS_I(inode)->lock);
1671 BTRFS_I(inode)->outstanding_extents++;
1672 spin_unlock(&BTRFS_I(inode)->lock);
1673 }
1674
1675 /*
1676 * extent_io.c merge_extent_hook, used to track merged delayed allocation
1677 * extents so we can keep track of new extents that are just merged onto old
1678 * extents, such as when we are doing sequential writes, so we can properly
1679 * account for the metadata space we'll need.
1680 */
1681 static void btrfs_merge_extent_hook(void *private_data,
1682 struct extent_state *new,
1683 struct extent_state *other)
1684 {
1685 struct inode *inode = private_data;
1686 u64 new_size, old_size;
1687 u32 num_extents;
1688
1689 /* not delalloc, ignore it */
1690 if (!(other->state & EXTENT_DELALLOC))
1691 return;
1692
1693 if (new->start > other->start)
1694 new_size = new->end - other->start + 1;
1695 else
1696 new_size = other->end - new->start + 1;
1697
1698 /* we're not bigger than the max, unreserve the space and go */
1699 if (new_size <= BTRFS_MAX_EXTENT_SIZE) {
1700 spin_lock(&BTRFS_I(inode)->lock);
1701 BTRFS_I(inode)->outstanding_extents--;
1702 spin_unlock(&BTRFS_I(inode)->lock);
1703 return;
1704 }
1705
1706 /*
1707 * We have to add up either side to figure out how many extents were
1708 * accounted for before we merged into one big extent. If the number of
1709 * extents we accounted for is <= the amount we need for the new range
1710 * then we can return, otherwise drop. Think of it like this
1711 *
1712 * [ 4k][MAX_SIZE]
1713 *
1714 * So we've grown the extent by a MAX_SIZE extent, this would mean we
1715 * need 2 outstanding extents, on one side we have 1 and the other side
1716 * we have 1 so they are == and we can return. But in this case
1717 *
1718 * [MAX_SIZE+4k][MAX_SIZE+4k]
1719 *
1720 * Each range on their own accounts for 2 extents, but merged together
1721 * they are only 3 extents worth of accounting, so we need to drop in
1722 * this case.
1723 */
1724 old_size = other->end - other->start + 1;
1725 num_extents = count_max_extents(old_size);
1726 old_size = new->end - new->start + 1;
1727 num_extents += count_max_extents(old_size);
1728 if (count_max_extents(new_size) >= num_extents)
1729 return;
1730
1731 spin_lock(&BTRFS_I(inode)->lock);
1732 BTRFS_I(inode)->outstanding_extents--;
1733 spin_unlock(&BTRFS_I(inode)->lock);
1734 }
1735
1736 static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
1737 struct inode *inode)
1738 {
1739 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1740
1741 spin_lock(&root->delalloc_lock);
1742 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1743 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1744 &root->delalloc_inodes);
1745 set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1746 &BTRFS_I(inode)->runtime_flags);
1747 root->nr_delalloc_inodes++;
1748 if (root->nr_delalloc_inodes == 1) {
1749 spin_lock(&fs_info->delalloc_root_lock);
1750 BUG_ON(!list_empty(&root->delalloc_root));
1751 list_add_tail(&root->delalloc_root,
1752 &fs_info->delalloc_roots);
1753 spin_unlock(&fs_info->delalloc_root_lock);
1754 }
1755 }
1756 spin_unlock(&root->delalloc_lock);
1757 }
1758
1759
1760 void __btrfs_del_delalloc_inode(struct btrfs_root *root,
1761 struct btrfs_inode *inode)
1762 {
1763 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1764
1765 if (!list_empty(&inode->delalloc_inodes)) {
1766 list_del_init(&inode->delalloc_inodes);
1767 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1768 &inode->runtime_flags);
1769 root->nr_delalloc_inodes--;
1770 if (!root->nr_delalloc_inodes) {
1771 spin_lock(&fs_info->delalloc_root_lock);
1772 BUG_ON(list_empty(&root->delalloc_root));
1773 list_del_init(&root->delalloc_root);
1774 spin_unlock(&fs_info->delalloc_root_lock);
1775 }
1776 }
1777 }
1778
1779 static void btrfs_del_delalloc_inode(struct btrfs_root *root,
1780 struct btrfs_inode *inode)
1781 {
1782 spin_lock(&root->delalloc_lock);
1783 __btrfs_del_delalloc_inode(root, inode);
1784 spin_unlock(&root->delalloc_lock);
1785 }
1786
1787 /*
1788 * extent_io.c set_bit_hook, used to track delayed allocation
1789 * bytes in this file, and to maintain the list of inodes that
1790 * have pending delalloc work to be done.
1791 */
1792 static void btrfs_set_bit_hook(void *private_data,
1793 struct extent_state *state, unsigned *bits)
1794 {
1795 struct inode *inode = private_data;
1796
1797 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1798
1799 if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC))
1800 WARN_ON(1);
1801 /*
1802 * set_bit and clear bit hooks normally require _irqsave/restore
1803 * but in this case, we are only testing for the DELALLOC
1804 * bit, which is only set or cleared with irqs on
1805 */
1806 if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1807 struct btrfs_root *root = BTRFS_I(inode)->root;
1808 u64 len = state->end + 1 - state->start;
1809 bool do_list = !btrfs_is_free_space_inode(BTRFS_I(inode));
1810
1811 if (*bits & EXTENT_FIRST_DELALLOC) {
1812 *bits &= ~EXTENT_FIRST_DELALLOC;
1813 } else {
1814 spin_lock(&BTRFS_I(inode)->lock);
1815 BTRFS_I(inode)->outstanding_extents++;
1816 spin_unlock(&BTRFS_I(inode)->lock);
1817 }
1818
1819 /* For sanity tests */
1820 if (btrfs_is_testing(fs_info))
1821 return;
1822
1823 percpu_counter_add_batch(&fs_info->delalloc_bytes, len,
1824 fs_info->delalloc_batch);
1825 spin_lock(&BTRFS_I(inode)->lock);
1826 BTRFS_I(inode)->delalloc_bytes += len;
1827 if (*bits & EXTENT_DEFRAG)
1828 BTRFS_I(inode)->defrag_bytes += len;
1829 if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1830 &BTRFS_I(inode)->runtime_flags))
1831 btrfs_add_delalloc_inodes(root, inode);
1832 spin_unlock(&BTRFS_I(inode)->lock);
1833 }
1834
1835 if (!(state->state & EXTENT_DELALLOC_NEW) &&
1836 (*bits & EXTENT_DELALLOC_NEW)) {
1837 spin_lock(&BTRFS_I(inode)->lock);
1838 BTRFS_I(inode)->new_delalloc_bytes += state->end + 1 -
1839 state->start;
1840 spin_unlock(&BTRFS_I(inode)->lock);
1841 }
1842 }
1843
1844 /*
1845 * extent_io.c clear_bit_hook, see set_bit_hook for why
1846 */
1847 static void btrfs_clear_bit_hook(void *private_data,
1848 struct extent_state *state,
1849 unsigned *bits)
1850 {
1851 struct btrfs_inode *inode = BTRFS_I((struct inode *)private_data);
1852 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1853 u64 len = state->end + 1 - state->start;
1854 u32 num_extents = count_max_extents(len);
1855
1856 if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG)) {
1857 spin_lock(&inode->lock);
1858 inode->defrag_bytes -= len;
1859 spin_unlock(&inode->lock);
1860 }
1861
1862 /*
1863 * set_bit and clear bit hooks normally require _irqsave/restore
1864 * but in this case, we are only testing for the DELALLOC
1865 * bit, which is only set or cleared with irqs on
1866 */
1867 if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1868 struct btrfs_root *root = inode->root;
1869 bool do_list = !btrfs_is_free_space_inode(inode);
1870
1871 if (*bits & EXTENT_FIRST_DELALLOC) {
1872 *bits &= ~EXTENT_FIRST_DELALLOC;
1873 } else if (!(*bits & EXTENT_CLEAR_META_RESV)) {
1874 spin_lock(&inode->lock);
1875 inode->outstanding_extents -= num_extents;
1876 spin_unlock(&inode->lock);
1877 }
1878
1879 /*
1880 * We don't reserve metadata space for space cache inodes so we
1881 * don't need to call dellalloc_release_metadata if there is an
1882 * error.
1883 */
1884 if (*bits & EXTENT_CLEAR_META_RESV &&
1885 root != fs_info->tree_root)
1886 btrfs_delalloc_release_metadata(inode, len);
1887
1888 /* For sanity tests. */
1889 if (btrfs_is_testing(fs_info))
1890 return;
1891
1892 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID &&
1893 do_list && !(state->state & EXTENT_NORESERVE) &&
1894 (*bits & EXTENT_CLEAR_DATA_RESV))
1895 btrfs_free_reserved_data_space_noquota(
1896 &inode->vfs_inode,
1897 state->start, len);
1898
1899 percpu_counter_add_batch(&fs_info->delalloc_bytes, -len,
1900 fs_info->delalloc_batch);
1901 spin_lock(&inode->lock);
1902 inode->delalloc_bytes -= len;
1903 if (do_list && inode->delalloc_bytes == 0 &&
1904 test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1905 &inode->runtime_flags))
1906 btrfs_del_delalloc_inode(root, inode);
1907 spin_unlock(&inode->lock);
1908 }
1909
1910 if ((state->state & EXTENT_DELALLOC_NEW) &&
1911 (*bits & EXTENT_DELALLOC_NEW)) {
1912 spin_lock(&inode->lock);
1913 ASSERT(inode->new_delalloc_bytes >= len);
1914 inode->new_delalloc_bytes -= len;
1915 spin_unlock(&inode->lock);
1916 }
1917 }
1918
1919 /*
1920 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1921 * we don't create bios that span stripes or chunks
1922 *
1923 * return 1 if page cannot be merged to bio
1924 * return 0 if page can be merged to bio
1925 * return error otherwise
1926 */
1927 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1928 size_t size, struct bio *bio,
1929 unsigned long bio_flags)
1930 {
1931 struct inode *inode = page->mapping->host;
1932 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1933 u64 logical = (u64)bio->bi_iter.bi_sector << 9;
1934 u64 length = 0;
1935 u64 map_length;
1936 int ret;
1937
1938 if (bio_flags & EXTENT_BIO_COMPRESSED)
1939 return 0;
1940
1941 length = bio->bi_iter.bi_size;
1942 map_length = length;
1943 ret = btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length,
1944 NULL, 0);
1945 if (ret < 0)
1946 return ret;
1947 if (map_length < length + size)
1948 return 1;
1949 return 0;
1950 }
1951
1952 /*
1953 * in order to insert checksums into the metadata in large chunks,
1954 * we wait until bio submission time. All the pages in the bio are
1955 * checksummed and sums are attached onto the ordered extent record.
1956 *
1957 * At IO completion time the cums attached on the ordered extent record
1958 * are inserted into the btree
1959 */
1960 static blk_status_t __btrfs_submit_bio_start(void *private_data, struct bio *bio,
1961 int mirror_num, unsigned long bio_flags,
1962 u64 bio_offset)
1963 {
1964 struct inode *inode = private_data;
1965 blk_status_t ret = 0;
1966
1967 ret = btrfs_csum_one_bio(inode, bio, 0, 0);
1968 BUG_ON(ret); /* -ENOMEM */
1969 return 0;
1970 }
1971
1972 /*
1973 * in order to insert checksums into the metadata in large chunks,
1974 * we wait until bio submission time. All the pages in the bio are
1975 * checksummed and sums are attached onto the ordered extent record.
1976 *
1977 * At IO completion time the cums attached on the ordered extent record
1978 * are inserted into the btree
1979 */
1980 static blk_status_t __btrfs_submit_bio_done(void *private_data, struct bio *bio,
1981 int mirror_num, unsigned long bio_flags,
1982 u64 bio_offset)
1983 {
1984 struct inode *inode = private_data;
1985 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1986 blk_status_t ret;
1987
1988 ret = btrfs_map_bio(fs_info, bio, mirror_num, 1);
1989 if (ret) {
1990 bio->bi_status = ret;
1991 bio_endio(bio);
1992 }
1993 return ret;
1994 }
1995
1996 /*
1997 * extent_io.c submission hook. This does the right thing for csum calculation
1998 * on write, or reading the csums from the tree before a read
1999 */
2000 static blk_status_t btrfs_submit_bio_hook(void *private_data, struct bio *bio,
2001 int mirror_num, unsigned long bio_flags,
2002 u64 bio_offset)
2003 {
2004 struct inode *inode = private_data;
2005 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2006 struct btrfs_root *root = BTRFS_I(inode)->root;
2007 enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA;
2008 blk_status_t ret = 0;
2009 int skip_sum;
2010 int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
2011
2012 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
2013
2014 if (btrfs_is_free_space_inode(BTRFS_I(inode)))
2015 metadata = BTRFS_WQ_ENDIO_FREE_SPACE;
2016
2017 if (bio_op(bio) != REQ_OP_WRITE) {
2018 ret = btrfs_bio_wq_end_io(fs_info, bio, metadata);
2019 if (ret)
2020 goto out;
2021
2022 if (bio_flags & EXTENT_BIO_COMPRESSED) {
2023 ret = btrfs_submit_compressed_read(inode, bio,
2024 mirror_num,
2025 bio_flags);
2026 goto out;
2027 } else if (!skip_sum) {
2028 ret = btrfs_lookup_bio_sums(inode, bio, NULL);
2029 if (ret)
2030 goto out;
2031 }
2032 goto mapit;
2033 } else if (async && !skip_sum) {
2034 /* csum items have already been cloned */
2035 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2036 goto mapit;
2037 /* we're doing a write, do the async checksumming */
2038 ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, bio_flags,
2039 bio_offset, inode,
2040 __btrfs_submit_bio_start,
2041 __btrfs_submit_bio_done);
2042 goto out;
2043 } else if (!skip_sum) {
2044 ret = btrfs_csum_one_bio(inode, bio, 0, 0);
2045 if (ret)
2046 goto out;
2047 }
2048
2049 mapit:
2050 ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
2051
2052 out:
2053 if (ret) {
2054 bio->bi_status = ret;
2055 bio_endio(bio);
2056 }
2057 return ret;
2058 }
2059
2060 /*
2061 * given a list of ordered sums record them in the inode. This happens
2062 * at IO completion time based on sums calculated at bio submission time.
2063 */
2064 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
2065 struct inode *inode, struct list_head *list)
2066 {
2067 struct btrfs_ordered_sum *sum;
2068
2069 list_for_each_entry(sum, list, list) {
2070 trans->adding_csums = 1;
2071 btrfs_csum_file_blocks(trans,
2072 BTRFS_I(inode)->root->fs_info->csum_root, sum);
2073 trans->adding_csums = 0;
2074 }
2075 return 0;
2076 }
2077
2078 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
2079 struct extent_state **cached_state, int dedupe)
2080 {
2081 WARN_ON((end & (PAGE_SIZE - 1)) == 0);
2082 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
2083 cached_state);
2084 }
2085
2086 /* see btrfs_writepage_start_hook for details on why this is required */
2087 struct btrfs_writepage_fixup {
2088 struct page *page;
2089 struct btrfs_work work;
2090 };
2091
2092 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
2093 {
2094 struct btrfs_writepage_fixup *fixup;
2095 struct btrfs_ordered_extent *ordered;
2096 struct extent_state *cached_state = NULL;
2097 struct extent_changeset *data_reserved = NULL;
2098 struct page *page;
2099 struct inode *inode;
2100 u64 page_start;
2101 u64 page_end;
2102 int ret;
2103
2104 fixup = container_of(work, struct btrfs_writepage_fixup, work);
2105 page = fixup->page;
2106 again:
2107 lock_page(page);
2108 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
2109 ClearPageChecked(page);
2110 goto out_page;
2111 }
2112
2113 inode = page->mapping->host;
2114 page_start = page_offset(page);
2115 page_end = page_offset(page) + PAGE_SIZE - 1;
2116
2117 lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
2118 &cached_state);
2119
2120 /* already ordered? We're done */
2121 if (PagePrivate2(page))
2122 goto out;
2123
2124 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start,
2125 PAGE_SIZE);
2126 if (ordered) {
2127 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
2128 page_end, &cached_state, GFP_NOFS);
2129 unlock_page(page);
2130 btrfs_start_ordered_extent(inode, ordered, 1);
2131 btrfs_put_ordered_extent(ordered);
2132 goto again;
2133 }
2134
2135 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
2136 PAGE_SIZE);
2137 if (ret) {
2138 mapping_set_error(page->mapping, ret);
2139 end_extent_writepage(page, ret, page_start, page_end);
2140 ClearPageChecked(page);
2141 goto out;
2142 }
2143
2144 ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
2145 &cached_state, 0);
2146 if (ret) {
2147 mapping_set_error(page->mapping, ret);
2148 end_extent_writepage(page, ret, page_start, page_end);
2149 ClearPageChecked(page);
2150 goto out;
2151 }
2152
2153 ClearPageChecked(page);
2154 set_page_dirty(page);
2155 out:
2156 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
2157 &cached_state, GFP_NOFS);
2158 out_page:
2159 unlock_page(page);
2160 put_page(page);
2161 kfree(fixup);
2162 extent_changeset_free(data_reserved);
2163 }
2164
2165 /*
2166 * There are a few paths in the higher layers of the kernel that directly
2167 * set the page dirty bit without asking the filesystem if it is a
2168 * good idea. This causes problems because we want to make sure COW
2169 * properly happens and the data=ordered rules are followed.
2170 *
2171 * In our case any range that doesn't have the ORDERED bit set
2172 * hasn't been properly setup for IO. We kick off an async process
2173 * to fix it up. The async helper will wait for ordered extents, set
2174 * the delalloc bit and make it safe to write the page.
2175 */
2176 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
2177 {
2178 struct inode *inode = page->mapping->host;
2179 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2180 struct btrfs_writepage_fixup *fixup;
2181
2182 /* this page is properly in the ordered list */
2183 if (TestClearPagePrivate2(page))
2184 return 0;
2185
2186 if (PageChecked(page))
2187 return -EAGAIN;
2188
2189 fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
2190 if (!fixup)
2191 return -EAGAIN;
2192
2193 SetPageChecked(page);
2194 get_page(page);
2195 btrfs_init_work(&fixup->work, btrfs_fixup_helper,
2196 btrfs_writepage_fixup_worker, NULL, NULL);
2197 fixup->page = page;
2198 btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
2199 return -EBUSY;
2200 }
2201
2202 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
2203 struct inode *inode, u64 file_pos,
2204 u64 disk_bytenr, u64 disk_num_bytes,
2205 u64 num_bytes, u64 ram_bytes,
2206 u8 compression, u8 encryption,
2207 u16 other_encoding, int extent_type)
2208 {
2209 struct btrfs_root *root = BTRFS_I(inode)->root;
2210 struct btrfs_file_extent_item *fi;
2211 struct btrfs_path *path;
2212 struct extent_buffer *leaf;
2213 struct btrfs_key ins;
2214 u64 qg_released;
2215 int extent_inserted = 0;
2216 int ret;
2217
2218 path = btrfs_alloc_path();
2219 if (!path)
2220 return -ENOMEM;
2221
2222 /*
2223 * we may be replacing one extent in the tree with another.
2224 * The new extent is pinned in the extent map, and we don't want
2225 * to drop it from the cache until it is completely in the btree.
2226 *
2227 * So, tell btrfs_drop_extents to leave this extent in the cache.
2228 * the caller is expected to unpin it and allow it to be merged
2229 * with the others.
2230 */
2231 ret = __btrfs_drop_extents(trans, root, inode, path, file_pos,
2232 file_pos + num_bytes, NULL, 0,
2233 1, sizeof(*fi), &extent_inserted);
2234 if (ret)
2235 goto out;
2236
2237 if (!extent_inserted) {
2238 ins.objectid = btrfs_ino(BTRFS_I(inode));
2239 ins.offset = file_pos;
2240 ins.type = BTRFS_EXTENT_DATA_KEY;
2241
2242 path->leave_spinning = 1;
2243 ret = btrfs_insert_empty_item(trans, root, path, &ins,
2244 sizeof(*fi));
2245 if (ret)
2246 goto out;
2247 }
2248 leaf = path->nodes[0];
2249 fi = btrfs_item_ptr(leaf, path->slots[0],
2250 struct btrfs_file_extent_item);
2251 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2252 btrfs_set_file_extent_type(leaf, fi, extent_type);
2253 btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
2254 btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
2255 btrfs_set_file_extent_offset(leaf, fi, 0);
2256 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2257 btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
2258 btrfs_set_file_extent_compression(leaf, fi, compression);
2259 btrfs_set_file_extent_encryption(leaf, fi, encryption);
2260 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
2261
2262 btrfs_mark_buffer_dirty(leaf);
2263 btrfs_release_path(path);
2264
2265 inode_add_bytes(inode, num_bytes);
2266
2267 ins.objectid = disk_bytenr;
2268 ins.offset = disk_num_bytes;
2269 ins.type = BTRFS_EXTENT_ITEM_KEY;
2270
2271 /*
2272 * Release the reserved range from inode dirty range map, as it is
2273 * already moved into delayed_ref_head
2274 */
2275 ret = btrfs_qgroup_release_data(inode, file_pos, ram_bytes);
2276 if (ret < 0)
2277 goto out;
2278 qg_released = ret;
2279 ret = btrfs_alloc_reserved_file_extent(trans, root->root_key.objectid,
2280 btrfs_ino(BTRFS_I(inode)), file_pos, qg_released, &ins);
2281 out:
2282 btrfs_free_path(path);
2283
2284 return ret;
2285 }
2286
2287 /* snapshot-aware defrag */
2288 struct sa_defrag_extent_backref {
2289 struct rb_node node;
2290 struct old_sa_defrag_extent *old;
2291 u64 root_id;
2292 u64 inum;
2293 u64 file_pos;
2294 u64 extent_offset;
2295 u64 num_bytes;
2296 u64 generation;
2297 };
2298
2299 struct old_sa_defrag_extent {
2300 struct list_head list;
2301 struct new_sa_defrag_extent *new;
2302
2303 u64 extent_offset;
2304 u64 bytenr;
2305 u64 offset;
2306 u64 len;
2307 int count;
2308 };
2309
2310 struct new_sa_defrag_extent {
2311 struct rb_root root;
2312 struct list_head head;
2313 struct btrfs_path *path;
2314 struct inode *inode;
2315 u64 file_pos;
2316 u64 len;
2317 u64 bytenr;
2318 u64 disk_len;
2319 u8 compress_type;
2320 };
2321
2322 static int backref_comp(struct sa_defrag_extent_backref *b1,
2323 struct sa_defrag_extent_backref *b2)
2324 {
2325 if (b1->root_id < b2->root_id)
2326 return -1;
2327 else if (b1->root_id > b2->root_id)
2328 return 1;
2329
2330 if (b1->inum < b2->inum)
2331 return -1;
2332 else if (b1->inum > b2->inum)
2333 return 1;
2334
2335 if (b1->file_pos < b2->file_pos)
2336 return -1;
2337 else if (b1->file_pos > b2->file_pos)
2338 return 1;
2339
2340 /*
2341 * [------------------------------] ===> (a range of space)
2342 * |<--->| |<---->| =============> (fs/file tree A)
2343 * |<---------------------------->| ===> (fs/file tree B)
2344 *
2345 * A range of space can refer to two file extents in one tree while
2346 * refer to only one file extent in another tree.
2347 *
2348 * So we may process a disk offset more than one time(two extents in A)
2349 * and locate at the same extent(one extent in B), then insert two same
2350 * backrefs(both refer to the extent in B).
2351 */
2352 return 0;
2353 }
2354
2355 static void backref_insert(struct rb_root *root,
2356 struct sa_defrag_extent_backref *backref)
2357 {
2358 struct rb_node **p = &root->rb_node;
2359 struct rb_node *parent = NULL;
2360 struct sa_defrag_extent_backref *entry;
2361 int ret;
2362
2363 while (*p) {
2364 parent = *p;
2365 entry = rb_entry(parent, struct sa_defrag_extent_backref, node);
2366
2367 ret = backref_comp(backref, entry);
2368 if (ret < 0)
2369 p = &(*p)->rb_left;
2370 else
2371 p = &(*p)->rb_right;
2372 }
2373
2374 rb_link_node(&backref->node, parent, p);
2375 rb_insert_color(&backref->node, root);
2376 }
2377
2378 /*
2379 * Note the backref might has changed, and in this case we just return 0.
2380 */
2381 static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
2382 void *ctx)
2383 {
2384 struct btrfs_file_extent_item *extent;
2385 struct old_sa_defrag_extent *old = ctx;
2386 struct new_sa_defrag_extent *new = old->new;
2387 struct btrfs_path *path = new->path;
2388 struct btrfs_key key;
2389 struct btrfs_root *root;
2390 struct sa_defrag_extent_backref *backref;
2391 struct extent_buffer *leaf;
2392 struct inode *inode = new->inode;
2393 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2394 int slot;
2395 int ret;
2396 u64 extent_offset;
2397 u64 num_bytes;
2398
2399 if (BTRFS_I(inode)->root->root_key.objectid == root_id &&
2400 inum == btrfs_ino(BTRFS_I(inode)))
2401 return 0;
2402
2403 key.objectid = root_id;
2404 key.type = BTRFS_ROOT_ITEM_KEY;
2405 key.offset = (u64)-1;
2406
2407 root = btrfs_read_fs_root_no_name(fs_info, &key);
2408 if (IS_ERR(root)) {
2409 if (PTR_ERR(root) == -ENOENT)
2410 return 0;
2411 WARN_ON(1);
2412 btrfs_debug(fs_info, "inum=%llu, offset=%llu, root_id=%llu",
2413 inum, offset, root_id);
2414 return PTR_ERR(root);
2415 }
2416
2417 key.objectid = inum;
2418 key.type = BTRFS_EXTENT_DATA_KEY;
2419 if (offset > (u64)-1 << 32)
2420 key.offset = 0;
2421 else
2422 key.offset = offset;
2423
2424 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2425 if (WARN_ON(ret < 0))
2426 return ret;
2427 ret = 0;
2428
2429 while (1) {
2430 cond_resched();
2431
2432 leaf = path->nodes[0];
2433 slot = path->slots[0];
2434
2435 if (slot >= btrfs_header_nritems(leaf)) {
2436 ret = btrfs_next_leaf(root, path);
2437 if (ret < 0) {
2438 goto out;
2439 } else if (ret > 0) {
2440 ret = 0;
2441 goto out;
2442 }
2443 continue;
2444 }
2445
2446 path->slots[0]++;
2447
2448 btrfs_item_key_to_cpu(leaf, &key, slot);
2449
2450 if (key.objectid > inum)
2451 goto out;
2452
2453 if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY)
2454 continue;
2455
2456 extent = btrfs_item_ptr(leaf, slot,
2457 struct btrfs_file_extent_item);
2458
2459 if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
2460 continue;
2461
2462 /*
2463 * 'offset' refers to the exact key.offset,
2464 * NOT the 'offset' field in btrfs_extent_data_ref, ie.
2465 * (key.offset - extent_offset).
2466 */
2467 if (key.offset != offset)
2468 continue;
2469
2470 extent_offset = btrfs_file_extent_offset(leaf, extent);
2471 num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
2472
2473 if (extent_offset >= old->extent_offset + old->offset +
2474 old->len || extent_offset + num_bytes <=
2475 old->extent_offset + old->offset)
2476 continue;
2477 break;
2478 }
2479
2480 backref = kmalloc(sizeof(*backref), GFP_NOFS);
2481 if (!backref) {
2482 ret = -ENOENT;
2483 goto out;
2484 }
2485
2486 backref->root_id = root_id;
2487 backref->inum = inum;
2488 backref->file_pos = offset;
2489 backref->num_bytes = num_bytes;
2490 backref->extent_offset = extent_offset;
2491 backref->generation = btrfs_file_extent_generation(leaf, extent);
2492 backref->old = old;
2493 backref_insert(&new->root, backref);
2494 old->count++;
2495 out:
2496 btrfs_release_path(path);
2497 WARN_ON(ret);
2498 return ret;
2499 }
2500
2501 static noinline bool record_extent_backrefs(struct btrfs_path *path,
2502 struct new_sa_defrag_extent *new)
2503 {
2504 struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
2505 struct old_sa_defrag_extent *old, *tmp;
2506 int ret;
2507
2508 new->path = path;
2509
2510 list_for_each_entry_safe(old, tmp, &new->head, list) {
2511 ret = iterate_inodes_from_logical(old->bytenr +
2512 old->extent_offset, fs_info,
2513 path, record_one_backref,
2514 old);
2515 if (ret < 0 && ret != -ENOENT)
2516 return false;
2517
2518 /* no backref to be processed for this extent */
2519 if (!old->count) {
2520 list_del(&old->list);
2521 kfree(old);
2522 }
2523 }
2524
2525 if (list_empty(&new->head))
2526 return false;
2527
2528 return true;
2529 }
2530
2531 static int relink_is_mergable(struct extent_buffer *leaf,
2532 struct btrfs_file_extent_item *fi,
2533 struct new_sa_defrag_extent *new)
2534 {
2535 if (btrfs_file_extent_disk_bytenr(leaf, fi) != new->bytenr)
2536 return 0;
2537
2538 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2539 return 0;
2540
2541 if (btrfs_file_extent_compression(leaf, fi) != new->compress_type)
2542 return 0;
2543
2544 if (btrfs_file_extent_encryption(leaf, fi) ||
2545 btrfs_file_extent_other_encoding(leaf, fi))
2546 return 0;
2547
2548 return 1;
2549 }
2550
2551 /*
2552 * Note the backref might has changed, and in this case we just return 0.
2553 */
2554 static noinline int relink_extent_backref(struct btrfs_path *path,
2555 struct sa_defrag_extent_backref *prev,
2556 struct sa_defrag_extent_backref *backref)
2557 {
2558 struct btrfs_file_extent_item *extent;
2559 struct btrfs_file_extent_item *item;
2560 struct btrfs_ordered_extent *ordered;
2561 struct btrfs_trans_handle *trans;
2562 struct btrfs_root *root;
2563 struct btrfs_key key;
2564 struct extent_buffer *leaf;
2565 struct old_sa_defrag_extent *old = backref->old;
2566 struct new_sa_defrag_extent *new = old->new;
2567 struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
2568 struct inode *inode;
2569 struct extent_state *cached = NULL;
2570 int ret = 0;
2571 u64 start;
2572 u64 len;
2573 u64 lock_start;
2574 u64 lock_end;
2575 bool merge = false;
2576 int index;
2577
2578 if (prev && prev->root_id == backref->root_id &&
2579 prev->inum == backref->inum &&
2580 prev->file_pos + prev->num_bytes == backref->file_pos)
2581 merge = true;
2582
2583 /* step 1: get root */
2584 key.objectid = backref->root_id;
2585 key.type = BTRFS_ROOT_ITEM_KEY;
2586 key.offset = (u64)-1;
2587
2588 index = srcu_read_lock(&fs_info->subvol_srcu);
2589
2590 root = btrfs_read_fs_root_no_name(fs_info, &key);
2591 if (IS_ERR(root)) {
2592 srcu_read_unlock(&fs_info->subvol_srcu, index);
2593 if (PTR_ERR(root) == -ENOENT)
2594 return 0;
2595 return PTR_ERR(root);
2596 }
2597
2598 if (btrfs_root_readonly(root)) {
2599 srcu_read_unlock(&fs_info->subvol_srcu, index);
2600 return 0;
2601 }
2602
2603 /* step 2: get inode */
2604 key.objectid = backref->inum;
2605 key.type = BTRFS_INODE_ITEM_KEY;
2606 key.offset = 0;
2607
2608 inode = btrfs_iget(fs_info->sb, &key, root, NULL);
2609 if (IS_ERR(inode)) {
2610 srcu_read_unlock(&fs_info->subvol_srcu, index);
2611 return 0;
2612 }
2613
2614 srcu_read_unlock(&fs_info->subvol_srcu, index);
2615
2616 /* step 3: relink backref */
2617 lock_start = backref->file_pos;
2618 lock_end = backref->file_pos + backref->num_bytes - 1;
2619 lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2620 &cached);
2621
2622 ordered = btrfs_lookup_first_ordered_extent(inode, lock_end);
2623 if (ordered) {
2624 btrfs_put_ordered_extent(ordered);
2625 goto out_unlock;
2626 }
2627
2628 trans = btrfs_join_transaction(root);
2629 if (IS_ERR(trans)) {
2630 ret = PTR_ERR(trans);
2631 goto out_unlock;
2632 }
2633
2634 key.objectid = backref->inum;
2635 key.type = BTRFS_EXTENT_DATA_KEY;
2636 key.offset = backref->file_pos;
2637
2638 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2639 if (ret < 0) {
2640 goto out_free_path;
2641 } else if (ret > 0) {
2642 ret = 0;
2643 goto out_free_path;
2644 }
2645
2646 extent = btrfs_item_ptr(path->nodes[0], path->slots[0],
2647 struct btrfs_file_extent_item);
2648
2649 if (btrfs_file_extent_generation(path->nodes[0], extent) !=
2650 backref->generation)
2651 goto out_free_path;
2652
2653 btrfs_release_path(path);
2654
2655 start = backref->file_pos;
2656 if (backref->extent_offset < old->extent_offset + old->offset)
2657 start += old->extent_offset + old->offset -
2658 backref->extent_offset;
2659
2660 len = min(backref->extent_offset + backref->num_bytes,
2661 old->extent_offset + old->offset + old->len);
2662 len -= max(backref->extent_offset, old->extent_offset + old->offset);
2663
2664 ret = btrfs_drop_extents(trans, root, inode, start,
2665 start + len, 1);
2666 if (ret)
2667 goto out_free_path;
2668 again:
2669 key.objectid = btrfs_ino(BTRFS_I(inode));
2670 key.type = BTRFS_EXTENT_DATA_KEY;
2671 key.offset = start;
2672
2673 path->leave_spinning = 1;
2674 if (merge) {
2675 struct btrfs_file_extent_item *fi;
2676 u64 extent_len;
2677 struct btrfs_key found_key;
2678
2679 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2680 if (ret < 0)
2681 goto out_free_path;
2682
2683 path->slots[0]--;
2684 leaf = path->nodes[0];
2685 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2686
2687 fi = btrfs_item_ptr(leaf, path->slots[0],
2688 struct btrfs_file_extent_item);
2689 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
2690
2691 if (extent_len + found_key.offset == start &&
2692 relink_is_mergable(leaf, fi, new)) {
2693 btrfs_set_file_extent_num_bytes(leaf, fi,
2694 extent_len + len);
2695 btrfs_mark_buffer_dirty(leaf);
2696 inode_add_bytes(inode, len);
2697
2698 ret = 1;
2699 goto out_free_path;
2700 } else {
2701 merge = false;
2702 btrfs_release_path(path);
2703 goto again;
2704 }
2705 }
2706
2707 ret = btrfs_insert_empty_item(trans, root, path, &key,
2708 sizeof(*extent));
2709 if (ret) {
2710 btrfs_abort_transaction(trans, ret);
2711 goto out_free_path;
2712 }
2713
2714 leaf = path->nodes[0];
2715 item = btrfs_item_ptr(leaf, path->slots[0],
2716 struct btrfs_file_extent_item);
2717 btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr);
2718 btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len);
2719 btrfs_set_file_extent_offset(leaf, item, start - new->file_pos);
2720 btrfs_set_file_extent_num_bytes(leaf, item, len);
2721 btrfs_set_file_extent_ram_bytes(leaf, item, new->len);
2722 btrfs_set_file_extent_generation(leaf, item, trans->transid);
2723 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
2724 btrfs_set_file_extent_compression(leaf, item, new->compress_type);
2725 btrfs_set_file_extent_encryption(leaf, item, 0);
2726 btrfs_set_file_extent_other_encoding(leaf, item, 0);
2727
2728 btrfs_mark_buffer_dirty(leaf);
2729 inode_add_bytes(inode, len);
2730 btrfs_release_path(path);
2731
2732 ret = btrfs_inc_extent_ref(trans, fs_info, new->bytenr,
2733 new->disk_len, 0,
2734 backref->root_id, backref->inum,
2735 new->file_pos); /* start - extent_offset */
2736 if (ret) {
2737 btrfs_abort_transaction(trans, ret);
2738 goto out_free_path;
2739 }
2740
2741 ret = 1;
2742 out_free_path:
2743 btrfs_release_path(path);
2744 path->leave_spinning = 0;
2745 btrfs_end_transaction(trans);
2746 out_unlock:
2747 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2748 &cached, GFP_NOFS);
2749 iput(inode);
2750 return ret;
2751 }
2752
2753 static void free_sa_defrag_extent(struct new_sa_defrag_extent *new)
2754 {
2755 struct old_sa_defrag_extent *old, *tmp;
2756
2757 if (!new)
2758 return;
2759
2760 list_for_each_entry_safe(old, tmp, &new->head, list) {
2761 kfree(old);
2762 }
2763 kfree(new);
2764 }
2765
2766 static void relink_file_extents(struct new_sa_defrag_extent *new)
2767 {
2768 struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
2769 struct btrfs_path *path;
2770 struct sa_defrag_extent_backref *backref;
2771 struct sa_defrag_extent_backref *prev = NULL;
2772 struct inode *inode;
2773 struct btrfs_root *root;
2774 struct rb_node *node;
2775 int ret;
2776
2777 inode = new->inode;
2778 root = BTRFS_I(inode)->root;
2779
2780 path = btrfs_alloc_path();
2781 if (!path)
2782 return;
2783
2784 if (!record_extent_backrefs(path, new)) {
2785 btrfs_free_path(path);
2786 goto out;
2787 }
2788 btrfs_release_path(path);
2789
2790 while (1) {
2791 node = rb_first(&new->root);
2792 if (!node)
2793 break;
2794 rb_erase(node, &new->root);
2795
2796 backref = rb_entry(node, struct sa_defrag_extent_backref, node);
2797
2798 ret = relink_extent_backref(path, prev, backref);
2799 WARN_ON(ret < 0);
2800
2801 kfree(prev);
2802
2803 if (ret == 1)
2804 prev = backref;
2805 else
2806 prev = NULL;
2807 cond_resched();
2808 }
2809 kfree(prev);
2810
2811 btrfs_free_path(path);
2812 out:
2813 free_sa_defrag_extent(new);
2814
2815 atomic_dec(&fs_info->defrag_running);
2816 wake_up(&fs_info->transaction_wait);
2817 }
2818
2819 static struct new_sa_defrag_extent *
2820 record_old_file_extents(struct inode *inode,
2821 struct btrfs_ordered_extent *ordered)
2822 {
2823 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2824 struct btrfs_root *root = BTRFS_I(inode)->root;
2825 struct btrfs_path *path;
2826 struct btrfs_key key;
2827 struct old_sa_defrag_extent *old;
2828 struct new_sa_defrag_extent *new;
2829 int ret;
2830
2831 new = kmalloc(sizeof(*new), GFP_NOFS);
2832 if (!new)
2833 return NULL;
2834
2835 new->inode = inode;
2836 new->file_pos = ordered->file_offset;
2837 new->len = ordered->len;
2838 new->bytenr = ordered->start;
2839 new->disk_len = ordered->disk_len;
2840 new->compress_type = ordered->compress_type;
2841 new->root = RB_ROOT;
2842 INIT_LIST_HEAD(&new->head);
2843
2844 path = btrfs_alloc_path();
2845 if (!path)
2846 goto out_kfree;
2847
2848 key.objectid = btrfs_ino(BTRFS_I(inode));
2849 key.type = BTRFS_EXTENT_DATA_KEY;
2850 key.offset = new->file_pos;
2851
2852 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2853 if (ret < 0)
2854 goto out_free_path;
2855 if (ret > 0 && path->slots[0] > 0)
2856 path->slots[0]--;
2857
2858 /* find out all the old extents for the file range */
2859 while (1) {
2860 struct btrfs_file_extent_item *extent;
2861 struct extent_buffer *l;
2862 int slot;
2863 u64 num_bytes;
2864 u64 offset;
2865 u64 end;
2866 u64 disk_bytenr;
2867 u64 extent_offset;
2868
2869 l = path->nodes[0];
2870 slot = path->slots[0];
2871
2872 if (slot >= btrfs_header_nritems(l)) {
2873 ret = btrfs_next_leaf(root, path);
2874 if (ret < 0)
2875 goto out_free_path;
2876 else if (ret > 0)
2877 break;
2878 continue;
2879 }
2880
2881 btrfs_item_key_to_cpu(l, &key, slot);
2882
2883 if (key.objectid != btrfs_ino(BTRFS_I(inode)))
2884 break;
2885 if (key.type != BTRFS_EXTENT_DATA_KEY)
2886 break;
2887 if (key.offset >= new->file_pos + new->len)
2888 break;
2889
2890 extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item);
2891
2892 num_bytes = btrfs_file_extent_num_bytes(l, extent);
2893 if (key.offset + num_bytes < new->file_pos)
2894 goto next;
2895
2896 disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent);
2897 if (!disk_bytenr)
2898 goto next;
2899
2900 extent_offset = btrfs_file_extent_offset(l, extent);
2901
2902 old = kmalloc(sizeof(*old), GFP_NOFS);
2903 if (!old)
2904 goto out_free_path;
2905
2906 offset = max(new->file_pos, key.offset);
2907 end = min(new->file_pos + new->len, key.offset + num_bytes);
2908
2909 old->bytenr = disk_bytenr;
2910 old->extent_offset = extent_offset;
2911 old->offset = offset - key.offset;
2912 old->len = end - offset;
2913 old->new = new;
2914 old->count = 0;
2915 list_add_tail(&old->list, &new->head);
2916 next:
2917 path->slots[0]++;
2918 cond_resched();
2919 }
2920
2921 btrfs_free_path(path);
2922 atomic_inc(&fs_info->defrag_running);
2923
2924 return new;
2925
2926 out_free_path:
2927 btrfs_free_path(path);
2928 out_kfree:
2929 free_sa_defrag_extent(new);
2930 return NULL;
2931 }
2932
2933 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info,
2934 u64 start, u64 len)
2935 {
2936 struct btrfs_block_group_cache *cache;
2937
2938 cache = btrfs_lookup_block_group(fs_info, start);
2939 ASSERT(cache);
2940
2941 spin_lock(&cache->lock);
2942 cache->delalloc_bytes -= len;
2943 spin_unlock(&cache->lock);
2944
2945 btrfs_put_block_group(cache);
2946 }
2947
2948 /* as ordered data IO finishes, this gets called so we can finish
2949 * an ordered extent if the range of bytes in the file it covers are
2950 * fully written.
2951 */
2952 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
2953 {
2954 struct inode *inode = ordered_extent->inode;
2955 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2956 struct btrfs_root *root = BTRFS_I(inode)->root;
2957 struct btrfs_trans_handle *trans = NULL;
2958 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2959 struct extent_state *cached_state = NULL;
2960 struct new_sa_defrag_extent *new = NULL;
2961 int compress_type = 0;
2962 int ret = 0;
2963 u64 logical_len = ordered_extent->len;
2964 bool nolock;
2965 bool truncated = false;
2966 bool range_locked = false;
2967 bool clear_new_delalloc_bytes = false;
2968 bool clear_reserved_extent = true;
2969
2970 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
2971 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) &&
2972 !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags))
2973 clear_new_delalloc_bytes = true;
2974
2975 nolock = btrfs_is_free_space_inode(BTRFS_I(inode));
2976
2977 if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
2978 ret = -EIO;
2979 goto out;
2980 }
2981
2982 btrfs_free_io_failure_record(BTRFS_I(inode),
2983 ordered_extent->file_offset,
2984 ordered_extent->file_offset +
2985 ordered_extent->len - 1);
2986
2987 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
2988 truncated = true;
2989 logical_len = ordered_extent->truncated_len;
2990 /* Truncated the entire extent, don't bother adding */
2991 if (!logical_len)
2992 goto out;
2993 }
2994
2995 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
2996 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
2997
2998 /*
2999 * For mwrite(mmap + memset to write) case, we still reserve
3000 * space for NOCOW range.
3001 * As NOCOW won't cause a new delayed ref, just free the space
3002 */
3003 btrfs_qgroup_free_data(inode, NULL, ordered_extent->file_offset,
3004 ordered_extent->len);
3005 btrfs_ordered_update_i_size(inode, 0, ordered_extent);
3006 if (nolock)
3007 trans = btrfs_join_transaction_nolock(root);
3008 else
3009 trans = btrfs_join_transaction(root);
3010 if (IS_ERR(trans)) {
3011 ret = PTR_ERR(trans);
3012 trans = NULL;
3013 goto out;
3014 }
3015 trans->block_rsv = &fs_info->delalloc_block_rsv;
3016 ret = btrfs_update_inode_fallback(trans, root, inode);
3017 if (ret) /* -ENOMEM or corruption */
3018 btrfs_abort_transaction(trans, ret);
3019 goto out;
3020 }
3021
3022 range_locked = true;
3023 lock_extent_bits(io_tree, ordered_extent->file_offset,
3024 ordered_extent->file_offset + ordered_extent->len - 1,
3025 &cached_state);
3026
3027 ret = test_range_bit(io_tree, ordered_extent->file_offset,
3028 ordered_extent->file_offset + ordered_extent->len - 1,
3029 EXTENT_DEFRAG, 0, cached_state);
3030 if (ret) {
3031 u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
3032 if (0 && last_snapshot >= BTRFS_I(inode)->generation)
3033 /* the inode is shared */
3034 new = record_old_file_extents(inode, ordered_extent);
3035
3036 clear_extent_bit(io_tree, ordered_extent->file_offset,
3037 ordered_extent->file_offset + ordered_extent->len - 1,
3038 EXTENT_DEFRAG, 0, 0, &cached_state, GFP_NOFS);
3039 }
3040
3041 if (nolock)
3042 trans = btrfs_join_transaction_nolock(root);
3043 else
3044 trans = btrfs_join_transaction(root);
3045 if (IS_ERR(trans)) {
3046 ret = PTR_ERR(trans);
3047 trans = NULL;
3048 goto out;
3049 }
3050
3051 trans->block_rsv = &fs_info->delalloc_block_rsv;
3052
3053 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
3054 compress_type = ordered_extent->compress_type;
3055 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3056 BUG_ON(compress_type);
3057 btrfs_qgroup_free_data(inode, NULL, ordered_extent->file_offset,
3058 ordered_extent->len);
3059 ret = btrfs_mark_extent_written(trans, BTRFS_I(inode),
3060 ordered_extent->file_offset,
3061 ordered_extent->file_offset +
3062 logical_len);
3063 } else {
3064 BUG_ON(root == fs_info->tree_root);
3065 ret = insert_reserved_file_extent(trans, inode,
3066 ordered_extent->file_offset,
3067 ordered_extent->start,
3068 ordered_extent->disk_len,
3069 logical_len, logical_len,
3070 compress_type, 0, 0,
3071 BTRFS_FILE_EXTENT_REG);
3072 if (!ret) {
3073 clear_reserved_extent = false;
3074 btrfs_release_delalloc_bytes(fs_info,
3075 ordered_extent->start,
3076 ordered_extent->disk_len);
3077 }
3078 }
3079 unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
3080 ordered_extent->file_offset, ordered_extent->len,
3081 trans->transid);
3082 if (ret < 0) {
3083 btrfs_abort_transaction(trans, ret);
3084 goto out;
3085 }
3086
3087 add_pending_csums(trans, inode, &ordered_extent->list);
3088
3089 btrfs_ordered_update_i_size(inode, 0, ordered_extent);
3090 ret = btrfs_update_inode_fallback(trans, root, inode);
3091 if (ret) { /* -ENOMEM or corruption */
3092 btrfs_abort_transaction(trans, ret);
3093 goto out;
3094 }
3095 ret = 0;
3096 out:
3097 if (range_locked || clear_new_delalloc_bytes) {
3098 unsigned int clear_bits = 0;
3099
3100 if (range_locked)
3101 clear_bits |= EXTENT_LOCKED;
3102 if (clear_new_delalloc_bytes)
3103 clear_bits |= EXTENT_DELALLOC_NEW;
3104 clear_extent_bit(&BTRFS_I(inode)->io_tree,
3105 ordered_extent->file_offset,
3106 ordered_extent->file_offset +
3107 ordered_extent->len - 1,
3108 clear_bits,
3109 (clear_bits & EXTENT_LOCKED) ? 1 : 0,
3110 0, &cached_state, GFP_NOFS);
3111 }
3112
3113 if (root != fs_info->tree_root)
3114 btrfs_delalloc_release_metadata(BTRFS_I(inode),
3115 ordered_extent->len);
3116 if (trans)
3117 btrfs_end_transaction(trans);
3118
3119 if (ret || truncated) {
3120 u64 start, end;
3121
3122 if (truncated)
3123 start = ordered_extent->file_offset + logical_len;
3124 else
3125 start = ordered_extent->file_offset;
3126 end = ordered_extent->file_offset + ordered_extent->len - 1;
3127 clear_extent_uptodate(io_tree, start, end, NULL, GFP_NOFS);
3128
3129 /* Drop the cache for the part of the extent we didn't write. */
3130 btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0);
3131
3132 /*
3133 * If the ordered extent had an IOERR or something else went
3134 * wrong we need to return the space for this ordered extent
3135 * back to the allocator. We only free the extent in the
3136 * truncated case if we didn't write out the extent at all.
3137 *
3138 * If we made it past insert_reserved_file_extent before we
3139 * errored out then we don't need to do this as the accounting
3140 * has already been done.
3141 */
3142 if ((ret || !logical_len) &&
3143 clear_reserved_extent &&
3144 !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3145 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
3146 btrfs_free_reserved_extent(fs_info,
3147 ordered_extent->start,
3148 ordered_extent->disk_len, 1);
3149 }
3150
3151
3152 /*
3153 * This needs to be done to make sure anybody waiting knows we are done
3154 * updating everything for this ordered extent.
3155 */
3156 btrfs_remove_ordered_extent(inode, ordered_extent);
3157
3158 /* for snapshot-aware defrag */
3159 if (new) {
3160 if (ret) {
3161 free_sa_defrag_extent(new);
3162 atomic_dec(&fs_info->defrag_running);
3163 } else {
3164 relink_file_extents(new);
3165 }
3166 }
3167
3168 /* once for us */
3169 btrfs_put_ordered_extent(ordered_extent);
3170 /* once for the tree */
3171 btrfs_put_ordered_extent(ordered_extent);
3172
3173 return ret;
3174 }
3175
3176 static void finish_ordered_fn(struct btrfs_work *work)
3177 {
3178 struct btrfs_ordered_extent *ordered_extent;
3179 ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
3180 btrfs_finish_ordered_io(ordered_extent);
3181 }
3182
3183 static void btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
3184 struct extent_state *state, int uptodate)
3185 {
3186 struct inode *inode = page->mapping->host;
3187 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3188 struct btrfs_ordered_extent *ordered_extent = NULL;
3189 struct btrfs_workqueue *wq;
3190 btrfs_work_func_t func;
3191
3192 trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
3193
3194 ClearPagePrivate2(page);
3195 if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
3196 end - start + 1, uptodate))
3197 return;
3198
3199 if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
3200 wq = fs_info->endio_freespace_worker;
3201 func = btrfs_freespace_write_helper;
3202 } else {
3203 wq = fs_info->endio_write_workers;
3204 func = btrfs_endio_write_helper;
3205 }
3206
3207 btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL,
3208 NULL);
3209 btrfs_queue_work(wq, &ordered_extent->work);
3210 }
3211
3212 static int __readpage_endio_check(struct inode *inode,
3213 struct btrfs_io_bio *io_bio,
3214 int icsum, struct page *page,
3215 int pgoff, u64 start, size_t len)
3216 {
3217 char *kaddr;
3218 u32 csum_expected;
3219 u32 csum = ~(u32)0;
3220
3221 csum_expected = *(((u32 *)io_bio->csum) + icsum);
3222
3223 kaddr = kmap_atomic(page);
3224 csum = btrfs_csum_data(kaddr + pgoff, csum, len);
3225 btrfs_csum_final(csum, (u8 *)&csum);
3226 if (csum != csum_expected)
3227 goto zeroit;
3228
3229 kunmap_atomic(kaddr);
3230 return 0;
3231 zeroit:
3232 btrfs_print_data_csum_error(BTRFS_I(inode), start, csum, csum_expected,
3233 io_bio->mirror_num);
3234 memset(kaddr + pgoff, 1, len);
3235 flush_dcache_page(page);
3236 kunmap_atomic(kaddr);
3237 return -EIO;
3238 }
3239
3240 /*
3241 * when reads are done, we need to check csums to verify the data is correct
3242 * if there's a match, we allow the bio to finish. If not, the code in
3243 * extent_io.c will try to find good copies for us.
3244 */
3245 static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
3246 u64 phy_offset, struct page *page,
3247 u64 start, u64 end, int mirror)
3248 {
3249 size_t offset = start - page_offset(page);
3250 struct inode *inode = page->mapping->host;
3251 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3252 struct btrfs_root *root = BTRFS_I(inode)->root;
3253
3254 if (PageChecked(page)) {
3255 ClearPageChecked(page);
3256 return 0;
3257 }
3258
3259 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
3260 return 0;
3261
3262 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
3263 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
3264 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM);
3265 return 0;
3266 }
3267
3268 phy_offset >>= inode->i_sb->s_blocksize_bits;
3269 return __readpage_endio_check(inode, io_bio, phy_offset, page, offset,
3270 start, (size_t)(end - start + 1));
3271 }
3272
3273 void btrfs_add_delayed_iput(struct inode *inode)
3274 {
3275 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3276 struct btrfs_inode *binode = BTRFS_I(inode);
3277
3278 if (atomic_add_unless(&inode->i_count, -1, 1))
3279 return;
3280
3281 spin_lock(&fs_info->delayed_iput_lock);
3282 if (binode->delayed_iput_count == 0) {
3283 ASSERT(list_empty(&binode->delayed_iput));
3284 list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs);
3285 } else {
3286 binode->delayed_iput_count++;
3287 }
3288 spin_unlock(&fs_info->delayed_iput_lock);
3289 }
3290
3291 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
3292 {
3293
3294 spin_lock(&fs_info->delayed_iput_lock);
3295 while (!list_empty(&fs_info->delayed_iputs)) {
3296 struct btrfs_inode *inode;
3297
3298 inode = list_first_entry(&fs_info->delayed_iputs,
3299 struct btrfs_inode, delayed_iput);
3300 if (inode->delayed_iput_count) {
3301 inode->delayed_iput_count--;
3302 list_move_tail(&inode->delayed_iput,
3303 &fs_info->delayed_iputs);
3304 } else {
3305 list_del_init(&inode->delayed_iput);
3306 }
3307 spin_unlock(&fs_info->delayed_iput_lock);
3308 iput(&inode->vfs_inode);
3309 spin_lock(&fs_info->delayed_iput_lock);
3310 }
3311 spin_unlock(&fs_info->delayed_iput_lock);
3312 }
3313
3314 /*
3315 * This is called in transaction commit time. If there are no orphan
3316 * files in the subvolume, it removes orphan item and frees block_rsv
3317 * structure.
3318 */
3319 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
3320 struct btrfs_root *root)
3321 {
3322 struct btrfs_fs_info *fs_info = root->fs_info;
3323 struct btrfs_block_rsv *block_rsv;
3324 int ret;
3325
3326 if (atomic_read(&root->orphan_inodes) ||
3327 root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
3328 return;
3329
3330 spin_lock(&root->orphan_lock);
3331 if (atomic_read(&root->orphan_inodes)) {
3332 spin_unlock(&root->orphan_lock);
3333 return;
3334 }
3335
3336 if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) {
3337 spin_unlock(&root->orphan_lock);
3338 return;
3339 }
3340
3341 block_rsv = root->orphan_block_rsv;
3342 root->orphan_block_rsv = NULL;
3343 spin_unlock(&root->orphan_lock);
3344
3345 if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state) &&
3346 btrfs_root_refs(&root->root_item) > 0) {
3347 ret = btrfs_del_orphan_item(trans, fs_info->tree_root,
3348 root->root_key.objectid);
3349 if (ret)
3350 btrfs_abort_transaction(trans, ret);
3351 else
3352 clear_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
3353 &root->state);
3354 }
3355
3356 if (block_rsv) {
3357 WARN_ON(block_rsv->size > 0);
3358 btrfs_free_block_rsv(fs_info, block_rsv);
3359 }
3360 }
3361
3362 /*
3363 * This creates an orphan entry for the given inode in case something goes
3364 * wrong in the middle of an unlink/truncate.
3365 *
3366 * NOTE: caller of this function should reserve 5 units of metadata for
3367 * this function.
3368 */
3369 int btrfs_orphan_add(struct btrfs_trans_handle *trans,
3370 struct btrfs_inode *inode)
3371 {
3372 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
3373 struct btrfs_root *root = inode->root;
3374 struct btrfs_block_rsv *block_rsv = NULL;
3375 int reserve = 0;
3376 int insert = 0;
3377 int ret;
3378
3379 if (!root->orphan_block_rsv) {
3380 block_rsv = btrfs_alloc_block_rsv(fs_info,
3381 BTRFS_BLOCK_RSV_TEMP);
3382 if (!block_rsv)
3383 return -ENOMEM;
3384 }
3385
3386 spin_lock(&root->orphan_lock);
3387 if (!root->orphan_block_rsv) {
3388 root->orphan_block_rsv = block_rsv;
3389 } else if (block_rsv) {
3390 btrfs_free_block_rsv(fs_info, block_rsv);
3391 block_rsv = NULL;
3392 }
3393
3394 if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3395 &inode->runtime_flags)) {
3396 #if 0
3397 /*
3398 * For proper ENOSPC handling, we should do orphan
3399 * cleanup when mounting. But this introduces backward
3400 * compatibility issue.
3401 */
3402 if (!xchg(&root->orphan_item_inserted, 1))
3403 insert = 2;
3404 else
3405 insert = 1;
3406 #endif
3407 insert = 1;
3408 atomic_inc(&root->orphan_inodes);
3409 }
3410
3411 if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3412 &inode->runtime_flags))
3413 reserve = 1;
3414 spin_unlock(&root->orphan_lock);
3415
3416 /* grab metadata reservation from transaction handle */
3417 if (reserve) {
3418 ret = btrfs_orphan_reserve_metadata(trans, inode);
3419 ASSERT(!ret);
3420 if (ret) {
3421 /*
3422 * dec doesn't need spin_lock as ->orphan_block_rsv
3423 * would be released only if ->orphan_inodes is
3424 * zero.
3425 */
3426 atomic_dec(&root->orphan_inodes);
3427 clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3428 &inode->runtime_flags);
3429 if (insert)
3430 clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3431 &inode->runtime_flags);
3432 return ret;
3433 }
3434 }
3435
3436 /* insert an orphan item to track this unlinked/truncated file */
3437 if (insert >= 1) {
3438 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
3439 if (ret) {
3440 if (reserve) {
3441 clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3442 &inode->runtime_flags);
3443 btrfs_orphan_release_metadata(inode);
3444 }
3445 /*
3446 * btrfs_orphan_commit_root may race with us and set
3447 * ->orphan_block_rsv to zero, in order to avoid that,
3448 * decrease ->orphan_inodes after everything is done.
3449 */
3450 atomic_dec(&root->orphan_inodes);
3451 if (ret != -EEXIST) {
3452 clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3453 &inode->runtime_flags);
3454 btrfs_abort_transaction(trans, ret);
3455 return ret;
3456 }
3457 }
3458 ret = 0;
3459 }
3460
3461 /* insert an orphan item to track subvolume contains orphan files */
3462 if (insert >= 2) {
3463 ret = btrfs_insert_orphan_item(trans, fs_info->tree_root,
3464 root->root_key.objectid);
3465 if (ret && ret != -EEXIST) {
3466 btrfs_abort_transaction(trans, ret);
3467 return ret;
3468 }
3469 }
3470 return 0;
3471 }
3472
3473 /*
3474 * We have done the truncate/delete so we can go ahead and remove the orphan
3475 * item for this particular inode.
3476 */
3477 static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3478 struct btrfs_inode *inode)
3479 {
3480 struct btrfs_root *root = inode->root;
3481 int delete_item = 0;
3482 int ret = 0;
3483
3484 if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3485 &inode->runtime_flags))
3486 delete_item = 1;
3487
3488 if (delete_item && trans)
3489 ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
3490
3491 if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3492 &inode->runtime_flags))
3493 btrfs_orphan_release_metadata(inode);
3494
3495 /*
3496 * btrfs_orphan_commit_root may race with us and set ->orphan_block_rsv
3497 * to zero, in order to avoid that, decrease ->orphan_inodes after
3498 * everything is done.
3499 */
3500 if (delete_item)
3501 atomic_dec(&root->orphan_inodes);
3502
3503 return ret;
3504 }
3505
3506 /*
3507 * this cleans up any orphans that may be left on the list from the last use
3508 * of this root.
3509 */
3510 int btrfs_orphan_cleanup(struct btrfs_root *root)
3511 {
3512 struct btrfs_fs_info *fs_info = root->fs_info;
3513 struct btrfs_path *path;
3514 struct extent_buffer *leaf;
3515 struct btrfs_key key, found_key;
3516 struct btrfs_trans_handle *trans;
3517 struct inode *inode;
3518 u64 last_objectid = 0;
3519 int ret = 0, nr_unlink = 0, nr_truncate = 0;
3520
3521 if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
3522 return 0;
3523
3524 path = btrfs_alloc_path();
3525 if (!path) {
3526 ret = -ENOMEM;
3527 goto out;
3528 }
3529 path->reada = READA_BACK;
3530
3531 key.objectid = BTRFS_ORPHAN_OBJECTID;
3532 key.type = BTRFS_ORPHAN_ITEM_KEY;
3533 key.offset = (u64)-1;
3534
3535 while (1) {
3536 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3537 if (ret < 0)
3538 goto out;
3539
3540 /*
3541 * if ret == 0 means we found what we were searching for, which
3542 * is weird, but possible, so only screw with path if we didn't
3543 * find the key and see if we have stuff that matches
3544 */
3545 if (ret > 0) {
3546 ret = 0;
3547 if (path->slots[0] == 0)
3548 break;
3549 path->slots[0]--;
3550 }
3551
3552 /* pull out the item */
3553 leaf = path->nodes[0];
3554 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3555
3556 /* make sure the item matches what we want */
3557 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3558 break;
3559 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY)
3560 break;
3561
3562 /* release the path since we're done with it */
3563 btrfs_release_path(path);
3564
3565 /*
3566 * this is where we are basically btrfs_lookup, without the
3567 * crossing root thing. we store the inode number in the
3568 * offset of the orphan item.
3569 */
3570
3571 if (found_key.offset == last_objectid) {
3572 btrfs_err(fs_info,
3573 "Error removing orphan entry, stopping orphan cleanup");
3574 ret = -EINVAL;
3575 goto out;
3576 }
3577
3578 last_objectid = found_key.offset;
3579
3580 found_key.objectid = found_key.offset;
3581 found_key.type = BTRFS_INODE_ITEM_KEY;
3582 found_key.offset = 0;
3583 inode = btrfs_iget(fs_info->sb, &found_key, root, NULL);
3584 ret = PTR_ERR_OR_ZERO(inode);
3585 if (ret && ret != -ENOENT)
3586 goto out;
3587
3588 if (ret == -ENOENT && root == fs_info->tree_root) {
3589 struct btrfs_root *dead_root;
3590 struct btrfs_fs_info *fs_info = root->fs_info;
3591 int is_dead_root = 0;
3592
3593 /*
3594 * this is an orphan in the tree root. Currently these
3595 * could come from 2 sources:
3596 * a) a snapshot deletion in progress
3597 * b) a free space cache inode
3598 * We need to distinguish those two, as the snapshot
3599 * orphan must not get deleted.
3600 * find_dead_roots already ran before us, so if this
3601 * is a snapshot deletion, we should find the root
3602 * in the dead_roots list
3603 */
3604 spin_lock(&fs_info->trans_lock);
3605 list_for_each_entry(dead_root, &fs_info->dead_roots,
3606 root_list) {
3607 if (dead_root->root_key.objectid ==
3608 found_key.objectid) {
3609 is_dead_root = 1;
3610 break;
3611 }
3612 }
3613 spin_unlock(&fs_info->trans_lock);
3614 if (is_dead_root) {
3615 /* prevent this orphan from being found again */
3616 key.offset = found_key.objectid - 1;
3617 continue;
3618 }
3619 }
3620 /*
3621 * Inode is already gone but the orphan item is still there,
3622 * kill the orphan item.
3623 */
3624 if (ret == -ENOENT) {
3625 trans = btrfs_start_transaction(root, 1);
3626 if (IS_ERR(trans)) {
3627 ret = PTR_ERR(trans);
3628 goto out;
3629 }
3630 btrfs_debug(fs_info, "auto deleting %Lu",
3631 found_key.objectid);
3632 ret = btrfs_del_orphan_item(trans, root,
3633 found_key.objectid);
3634 btrfs_end_transaction(trans);
3635 if (ret)
3636 goto out;
3637 continue;
3638 }
3639
3640 /*
3641 * add this inode to the orphan list so btrfs_orphan_del does
3642 * the proper thing when we hit it
3643 */
3644 set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3645 &BTRFS_I(inode)->runtime_flags);
3646 atomic_inc(&root->orphan_inodes);
3647
3648 /* if we have links, this was a truncate, lets do that */
3649 if (inode->i_nlink) {
3650 if (WARN_ON(!S_ISREG(inode->i_mode))) {
3651 iput(inode);
3652 continue;
3653 }
3654 nr_truncate++;
3655
3656 /* 1 for the orphan item deletion. */
3657 trans = btrfs_start_transaction(root, 1);
3658 if (IS_ERR(trans)) {
3659 iput(inode);
3660 ret = PTR_ERR(trans);
3661 goto out;
3662 }
3663 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
3664 btrfs_end_transaction(trans);
3665 if (ret) {
3666 iput(inode);
3667 goto out;
3668 }
3669
3670 ret = btrfs_truncate(inode);
3671 if (ret)
3672 btrfs_orphan_del(NULL, BTRFS_I(inode));
3673 } else {
3674 nr_unlink++;
3675 }
3676
3677 /* this will do delete_inode and everything for us */
3678 iput(inode);
3679 if (ret)
3680 goto out;
3681 }
3682 /* release the path since we're done with it */
3683 btrfs_release_path(path);
3684
3685 root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
3686
3687 if (root->orphan_block_rsv)
3688 btrfs_block_rsv_release(fs_info, root->orphan_block_rsv,
3689 (u64)-1);
3690
3691 if (root->orphan_block_rsv ||
3692 test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
3693 trans = btrfs_join_transaction(root);
3694 if (!IS_ERR(trans))
3695 btrfs_end_transaction(trans);
3696 }
3697
3698 if (nr_unlink)
3699 btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink);
3700 if (nr_truncate)
3701 btrfs_debug(fs_info, "truncated %d orphans", nr_truncate);
3702
3703 out:
3704 if (ret)
3705 btrfs_err(fs_info, "could not do orphan cleanup %d", ret);
3706 btrfs_free_path(path);
3707 return ret;
3708 }
3709
3710 /*
3711 * very simple check to peek ahead in the leaf looking for xattrs. If we
3712 * don't find any xattrs, we know there can't be any acls.
3713 *
3714 * slot is the slot the inode is in, objectid is the objectid of the inode
3715 */
3716 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3717 int slot, u64 objectid,
3718 int *first_xattr_slot)
3719 {
3720 u32 nritems = btrfs_header_nritems(leaf);
3721 struct btrfs_key found_key;
3722 static u64 xattr_access = 0;
3723 static u64 xattr_default = 0;
3724 int scanned = 0;
3725
3726 if (!xattr_access) {
3727 xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS,
3728 strlen(XATTR_NAME_POSIX_ACL_ACCESS));
3729 xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT,
3730 strlen(XATTR_NAME_POSIX_ACL_DEFAULT));
3731 }
3732
3733 slot++;
3734 *first_xattr_slot = -1;
3735 while (slot < nritems) {
3736 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3737
3738 /* we found a different objectid, there must not be acls */
3739 if (found_key.objectid != objectid)
3740 return 0;
3741
3742 /* we found an xattr, assume we've got an acl */
3743 if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
3744 if (*first_xattr_slot == -1)
3745 *first_xattr_slot = slot;
3746 if (found_key.offset == xattr_access ||
3747 found_key.offset == xattr_default)
3748 return 1;
3749 }
3750
3751 /*
3752 * we found a key greater than an xattr key, there can't
3753 * be any acls later on
3754 */
3755 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3756 return 0;
3757
3758 slot++;
3759 scanned++;
3760
3761 /*
3762 * it goes inode, inode backrefs, xattrs, extents,
3763 * so if there are a ton of hard links to an inode there can
3764 * be a lot of backrefs. Don't waste time searching too hard,
3765 * this is just an optimization
3766 */
3767 if (scanned >= 8)
3768 break;
3769 }
3770 /* we hit the end of the leaf before we found an xattr or
3771 * something larger than an xattr. We have to assume the inode
3772 * has acls
3773 */
3774 if (*first_xattr_slot == -1)
3775 *first_xattr_slot = slot;
3776 return 1;
3777 }
3778
3779 /*
3780 * read an inode from the btree into the in-memory inode
3781 */
3782 static int btrfs_read_locked_inode(struct inode *inode)
3783 {
3784 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3785 struct btrfs_path *path;
3786 struct extent_buffer *leaf;
3787 struct btrfs_inode_item *inode_item;
3788 struct btrfs_root *root = BTRFS_I(inode)->root;
3789 struct btrfs_key location;
3790 unsigned long ptr;
3791 int maybe_acls;
3792 u32 rdev;
3793 int ret;
3794 bool filled = false;
3795 int first_xattr_slot;
3796
3797 ret = btrfs_fill_inode(inode, &rdev);
3798 if (!ret)
3799 filled = true;
3800
3801 path = btrfs_alloc_path();
3802 if (!path) {
3803 ret = -ENOMEM;
3804 goto make_bad;
3805 }
3806
3807 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
3808
3809 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
3810 if (ret) {
3811 if (ret > 0)
3812 ret = -ENOENT;
3813 goto make_bad;
3814 }
3815
3816 leaf = path->nodes[0];
3817
3818 if (filled)
3819 goto cache_index;
3820
3821 inode_item = btrfs_item_ptr(leaf, path->slots[0],
3822 struct btrfs_inode_item);
3823 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
3824 set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
3825 i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
3826 i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
3827 btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item));
3828
3829 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime);
3830 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime);
3831
3832 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime);
3833 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime);
3834
3835 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime);
3836 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime);
3837
3838 BTRFS_I(inode)->i_otime.tv_sec =
3839 btrfs_timespec_sec(leaf, &inode_item->otime);
3840 BTRFS_I(inode)->i_otime.tv_nsec =
3841 btrfs_timespec_nsec(leaf, &inode_item->otime);
3842
3843 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
3844 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
3845 BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
3846
3847 inode->i_version = btrfs_inode_sequence(leaf, inode_item);
3848 inode->i_generation = BTRFS_I(inode)->generation;
3849 inode->i_rdev = 0;
3850 rdev = btrfs_inode_rdev(leaf, inode_item);
3851
3852 BTRFS_I(inode)->index_cnt = (u64)-1;
3853 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
3854
3855 cache_index:
3856 /*
3857 * If we were modified in the current generation and evicted from memory
3858 * and then re-read we need to do a full sync since we don't have any
3859 * idea about which extents were modified before we were evicted from
3860 * cache.
3861 *
3862 * This is required for both inode re-read from disk and delayed inode
3863 * in delayed_nodes_tree.
3864 */
3865 if (BTRFS_I(inode)->last_trans == fs_info->generation)
3866 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3867 &BTRFS_I(inode)->runtime_flags);
3868
3869 /*
3870 * We don't persist the id of the transaction where an unlink operation
3871 * against the inode was last made. So here we assume the inode might
3872 * have been evicted, and therefore the exact value of last_unlink_trans
3873 * lost, and set it to last_trans to avoid metadata inconsistencies
3874 * between the inode and its parent if the inode is fsync'ed and the log
3875 * replayed. For example, in the scenario:
3876 *
3877 * touch mydir/foo
3878 * ln mydir/foo mydir/bar
3879 * sync
3880 * unlink mydir/bar
3881 * echo 2 > /proc/sys/vm/drop_caches # evicts inode
3882 * xfs_io -c fsync mydir/foo
3883 * <power failure>
3884 * mount fs, triggers fsync log replay
3885 *
3886 * We must make sure that when we fsync our inode foo we also log its
3887 * parent inode, otherwise after log replay the parent still has the
3888 * dentry with the "bar" name but our inode foo has a link count of 1
3889 * and doesn't have an inode ref with the name "bar" anymore.
3890 *
3891 * Setting last_unlink_trans to last_trans is a pessimistic approach,
3892 * but it guarantees correctness at the expense of occasional full
3893 * transaction commits on fsync if our inode is a directory, or if our
3894 * inode is not a directory, logging its parent unnecessarily.
3895 */
3896 BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
3897 /*
3898 * Similar reasoning for last_link_trans, needs to be set otherwise
3899 * for a case like the following:
3900 *
3901 * mkdir A
3902 * touch foo
3903 * ln foo A/bar
3904 * echo 2 > /proc/sys/vm/drop_caches
3905 * fsync foo
3906 * <power failure>
3907 *
3908 * Would result in link bar and directory A not existing after the power
3909 * failure.
3910 */
3911 BTRFS_I(inode)->last_link_trans = BTRFS_I(inode)->last_trans;
3912
3913 path->slots[0]++;
3914 if (inode->i_nlink != 1 ||
3915 path->slots[0] >= btrfs_header_nritems(leaf))
3916 goto cache_acl;
3917
3918 btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
3919 if (location.objectid != btrfs_ino(BTRFS_I(inode)))
3920 goto cache_acl;
3921
3922 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3923 if (location.type == BTRFS_INODE_REF_KEY) {
3924 struct btrfs_inode_ref *ref;
3925
3926 ref = (struct btrfs_inode_ref *)ptr;
3927 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref);
3928 } else if (location.type == BTRFS_INODE_EXTREF_KEY) {
3929 struct btrfs_inode_extref *extref;
3930
3931 extref = (struct btrfs_inode_extref *)ptr;
3932 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf,
3933 extref);
3934 }
3935 cache_acl:
3936 /*
3937 * try to precache a NULL acl entry for files that don't have
3938 * any xattrs or acls
3939 */
3940 maybe_acls = acls_after_inode_item(leaf, path->slots[0],
3941 btrfs_ino(BTRFS_I(inode)), &first_xattr_slot);
3942 if (first_xattr_slot != -1) {
3943 path->slots[0] = first_xattr_slot;
3944 ret = btrfs_load_inode_props(inode, path);
3945 if (ret)
3946 btrfs_err(fs_info,
3947 "error loading props for ino %llu (root %llu): %d",
3948 btrfs_ino(BTRFS_I(inode)),
3949 root->root_key.objectid, ret);
3950 }
3951 btrfs_free_path(path);
3952
3953 if (!maybe_acls)
3954 cache_no_acl(inode);
3955
3956 switch (inode->i_mode & S_IFMT) {
3957 case S_IFREG:
3958 inode->i_mapping->a_ops = &btrfs_aops;
3959 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3960 inode->i_fop = &btrfs_file_operations;
3961 inode->i_op = &btrfs_file_inode_operations;
3962 break;
3963 case S_IFDIR:
3964 inode->i_fop = &btrfs_dir_file_operations;
3965 inode->i_op = &btrfs_dir_inode_operations;
3966 break;
3967 case S_IFLNK:
3968 inode->i_op = &btrfs_symlink_inode_operations;
3969 inode_nohighmem(inode);
3970 inode->i_mapping->a_ops = &btrfs_symlink_aops;
3971 break;
3972 default:
3973 inode->i_op = &btrfs_special_inode_operations;
3974 init_special_inode(inode, inode->i_mode, rdev);
3975 break;
3976 }
3977
3978 btrfs_update_iflags(inode);
3979 return 0;
3980
3981 make_bad:
3982 btrfs_free_path(path);
3983 make_bad_inode(inode);
3984 return ret;
3985 }
3986
3987 /*
3988 * given a leaf and an inode, copy the inode fields into the leaf
3989 */
3990 static void fill_inode_item(struct btrfs_trans_handle *trans,
3991 struct extent_buffer *leaf,
3992 struct btrfs_inode_item *item,
3993 struct inode *inode)
3994 {
3995 struct btrfs_map_token token;
3996
3997 btrfs_init_map_token(&token);
3998
3999 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
4000 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
4001 btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size,
4002 &token);
4003 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
4004 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
4005
4006 btrfs_set_token_timespec_sec(leaf, &item->atime,
4007 inode->i_atime.tv_sec, &token);
4008 btrfs_set_token_timespec_nsec(leaf, &item->atime,
4009 inode->i_atime.tv_nsec, &token);
4010
4011 btrfs_set_token_timespec_sec(leaf, &item->mtime,
4012 inode->i_mtime.tv_sec, &token);
4013 btrfs_set_token_timespec_nsec(leaf, &item->mtime,
4014 inode->i_mtime.tv_nsec, &token);
4015
4016 btrfs_set_token_timespec_sec(leaf, &item->ctime,
4017 inode->i_ctime.tv_sec, &token);
4018 btrfs_set_token_timespec_nsec(leaf, &item->ctime,
4019 inode->i_ctime.tv_nsec, &token);
4020
4021 btrfs_set_token_timespec_sec(leaf, &item->otime,
4022 BTRFS_I(inode)->i_otime.tv_sec, &token);
4023 btrfs_set_token_timespec_nsec(leaf, &item->otime,
4024 BTRFS_I(inode)->i_otime.tv_nsec, &token);
4025
4026 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
4027 &token);
4028 btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation,
4029 &token);
4030 btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
4031 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
4032 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
4033 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
4034 btrfs_set_token_inode_block_group(leaf, item, 0, &token);
4035 }
4036
4037 /*
4038 * copy everything in the in-memory inode into the btree.
4039 */
4040 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
4041 struct btrfs_root *root, struct inode *inode)
4042 {
4043 struct btrfs_inode_item *inode_item;
4044 struct btrfs_path *path;
4045 struct extent_buffer *leaf;
4046 int ret;
4047
4048 path = btrfs_alloc_path();
4049 if (!path)
4050 return -ENOMEM;
4051
4052 path->leave_spinning = 1;
4053 ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
4054 1);
4055 if (ret) {
4056 if (ret > 0)
4057 ret = -ENOENT;
4058 goto failed;
4059 }
4060
4061 leaf = path->nodes[0];
4062 inode_item = btrfs_item_ptr(leaf, path->slots[0],
4063 struct btrfs_inode_item);
4064
4065 fill_inode_item(trans, leaf, inode_item, inode);
4066 btrfs_mark_buffer_dirty(leaf);
4067 btrfs_set_inode_last_trans(trans, inode);
4068 ret = 0;
4069 failed:
4070 btrfs_free_path(path);
4071 return ret;
4072 }
4073
4074 /*
4075 * copy everything in the in-memory inode into the btree.
4076 */
4077 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
4078 struct btrfs_root *root, struct inode *inode)
4079 {
4080 struct btrfs_fs_info *fs_info = root->fs_info;
4081 int ret;
4082
4083 /*
4084 * If the inode is a free space inode, we can deadlock during commit
4085 * if we put it into the delayed code.
4086 *
4087 * The data relocation inode should also be directly updated
4088 * without delay
4089 */
4090 if (!btrfs_is_free_space_inode(BTRFS_I(inode))
4091 && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
4092 && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
4093 btrfs_update_root_times(trans, root);
4094
4095 ret = btrfs_delayed_update_inode(trans, root, inode);
4096 if (!ret)
4097 btrfs_set_inode_last_trans(trans, inode);
4098 return ret;
4099 }
4100
4101 return btrfs_update_inode_item(trans, root, inode);
4102 }
4103
4104 noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
4105 struct btrfs_root *root,
4106 struct inode *inode)
4107 {
4108 int ret;
4109
4110 ret = btrfs_update_inode(trans, root, inode);
4111 if (ret == -ENOSPC)
4112 return btrfs_update_inode_item(trans, root, inode);
4113 return ret;
4114 }
4115
4116 /*
4117 * unlink helper that gets used here in inode.c and in the tree logging
4118 * recovery code. It remove a link in a directory with a given name, and
4119 * also drops the back refs in the inode to the directory
4120 */
4121 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4122 struct btrfs_root *root,
4123 struct btrfs_inode *dir,
4124 struct btrfs_inode *inode,
4125 const char *name, int name_len)
4126 {
4127 struct btrfs_fs_info *fs_info = root->fs_info;
4128 struct btrfs_path *path;
4129 int ret = 0;
4130 struct extent_buffer *leaf;
4131 struct btrfs_dir_item *di;
4132 struct btrfs_key key;
4133 u64 index;
4134 u64 ino = btrfs_ino(inode);
4135 u64 dir_ino = btrfs_ino(dir);
4136
4137 path = btrfs_alloc_path();
4138 if (!path) {
4139 ret = -ENOMEM;
4140 goto out;
4141 }
4142
4143 path->leave_spinning = 1;
4144 di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4145 name, name_len, -1);
4146 if (IS_ERR(di)) {
4147 ret = PTR_ERR(di);
4148 goto err;
4149 }
4150 if (!di) {
4151 ret = -ENOENT;
4152 goto err;
4153 }
4154 leaf = path->nodes[0];
4155 btrfs_dir_item_key_to_cpu(leaf, di, &key);
4156 ret = btrfs_delete_one_dir_name(trans, root, path, di);
4157 if (ret)
4158 goto err;
4159 btrfs_release_path(path);
4160
4161 /*
4162 * If we don't have dir index, we have to get it by looking up
4163 * the inode ref, since we get the inode ref, remove it directly,
4164 * it is unnecessary to do delayed deletion.
4165 *
4166 * But if we have dir index, needn't search inode ref to get it.
4167 * Since the inode ref is close to the inode item, it is better
4168 * that we delay to delete it, and just do this deletion when
4169 * we update the inode item.
4170 */
4171 if (inode->dir_index) {
4172 ret = btrfs_delayed_delete_inode_ref(inode);
4173 if (!ret) {
4174 index = inode->dir_index;
4175 goto skip_backref;
4176 }
4177 }
4178
4179 ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
4180 dir_ino, &index);
4181 if (ret) {
4182 btrfs_info(fs_info,
4183 "failed to delete reference to %.*s, inode %llu parent %llu",
4184 name_len, name, ino, dir_ino);
4185 btrfs_abort_transaction(trans, ret);
4186 goto err;
4187 }
4188 skip_backref:
4189 ret = btrfs_delete_delayed_dir_index(trans, fs_info, dir, index);
4190 if (ret) {
4191 btrfs_abort_transaction(trans, ret);
4192 goto err;
4193 }
4194
4195 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, inode,
4196 dir_ino);
4197 if (ret != 0 && ret != -ENOENT) {
4198 btrfs_abort_transaction(trans, ret);
4199 goto err;
4200 }
4201
4202 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, dir,
4203 index);
4204 if (ret == -ENOENT)
4205 ret = 0;
4206 else if (ret)
4207 btrfs_abort_transaction(trans, ret);
4208 err:
4209 btrfs_free_path(path);
4210 if (ret)
4211 goto out;
4212
4213 btrfs_i_size_write(dir, dir->vfs_inode.i_size - name_len * 2);
4214 inode_inc_iversion(&inode->vfs_inode);
4215 inode_inc_iversion(&dir->vfs_inode);
4216 inode->vfs_inode.i_ctime = dir->vfs_inode.i_mtime =
4217 dir->vfs_inode.i_ctime = current_time(&inode->vfs_inode);
4218 ret = btrfs_update_inode(trans, root, &dir->vfs_inode);
4219 out:
4220 return ret;
4221 }
4222
4223 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4224 struct btrfs_root *root,
4225 struct btrfs_inode *dir, struct btrfs_inode *inode,
4226 const char *name, int name_len)
4227 {
4228 int ret;
4229 ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
4230 if (!ret) {
4231 drop_nlink(&inode->vfs_inode);
4232 ret = btrfs_update_inode(trans, root, &inode->vfs_inode);
4233 }
4234 return ret;
4235 }
4236
4237 /*
4238 * helper to start transaction for unlink and rmdir.
4239 *
4240 * unlink and rmdir are special in btrfs, they do not always free space, so
4241 * if we cannot make our reservations the normal way try and see if there is
4242 * plenty of slack room in the global reserve to migrate, otherwise we cannot
4243 * allow the unlink to occur.
4244 */
4245 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
4246 {
4247 struct btrfs_root *root = BTRFS_I(dir)->root;
4248
4249 /*
4250 * 1 for the possible orphan item
4251 * 1 for the dir item
4252 * 1 for the dir index
4253 * 1 for the inode ref
4254 * 1 for the inode
4255 */
4256 return btrfs_start_transaction_fallback_global_rsv(root, 5, 5);
4257 }
4258
4259 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
4260 {
4261 struct btrfs_root *root = BTRFS_I(dir)->root;
4262 struct btrfs_trans_handle *trans;
4263 struct inode *inode = d_inode(dentry);
4264 int ret;
4265
4266 trans = __unlink_start_trans(dir);
4267 if (IS_ERR(trans))
4268 return PTR_ERR(trans);
4269
4270 btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4271 0);
4272
4273 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
4274 BTRFS_I(d_inode(dentry)), dentry->d_name.name,
4275 dentry->d_name.len);
4276 if (ret)
4277 goto out;
4278
4279 if (inode->i_nlink == 0) {
4280 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
4281 if (ret)
4282 goto out;
4283 }
4284
4285 out:
4286 btrfs_end_transaction(trans);
4287 btrfs_btree_balance_dirty(root->fs_info);
4288 return ret;
4289 }
4290
4291 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
4292 struct btrfs_root *root,
4293 struct inode *dir, u64 objectid,
4294 const char *name, int name_len)
4295 {
4296 struct btrfs_fs_info *fs_info = root->fs_info;
4297 struct btrfs_path *path;
4298 struct extent_buffer *leaf;
4299 struct btrfs_dir_item *di;
4300 struct btrfs_key key;
4301 u64 index;
4302 int ret;
4303 u64 dir_ino = btrfs_ino(BTRFS_I(dir));
4304
4305 path = btrfs_alloc_path();
4306 if (!path)
4307 return -ENOMEM;
4308
4309 di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4310 name, name_len, -1);
4311 if (IS_ERR_OR_NULL(di)) {
4312 if (!di)
4313 ret = -ENOENT;
4314 else
4315 ret = PTR_ERR(di);
4316 goto out;
4317 }
4318
4319 leaf = path->nodes[0];
4320 btrfs_dir_item_key_to_cpu(leaf, di, &key);
4321 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
4322 ret = btrfs_delete_one_dir_name(trans, root, path, di);
4323 if (ret) {
4324 btrfs_abort_transaction(trans, ret);
4325 goto out;
4326 }
4327 btrfs_release_path(path);
4328
4329 ret = btrfs_del_root_ref(trans, fs_info, objectid,
4330 root->root_key.objectid, dir_ino,
4331 &index, name, name_len);
4332 if (ret < 0) {
4333 if (ret != -ENOENT) {
4334 btrfs_abort_transaction(trans, ret);
4335 goto out;
4336 }
4337 di = btrfs_search_dir_index_item(root, path, dir_ino,
4338 name, name_len);
4339 if (IS_ERR_OR_NULL(di)) {
4340 if (!di)
4341 ret = -ENOENT;
4342 else
4343 ret = PTR_ERR(di);
4344 btrfs_abort_transaction(trans, ret);
4345 goto out;
4346 }
4347
4348 leaf = path->nodes[0];
4349 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4350 btrfs_release_path(path);
4351 index = key.offset;
4352 }
4353 btrfs_release_path(path);
4354
4355 ret = btrfs_delete_delayed_dir_index(trans, fs_info, BTRFS_I(dir), index);
4356 if (ret) {
4357 btrfs_abort_transaction(trans, ret);
4358 goto out;
4359 }
4360
4361 btrfs_i_size_write(BTRFS_I(dir), dir->i_size - name_len * 2);
4362 inode_inc_iversion(dir);
4363 dir->i_mtime = dir->i_ctime = current_time(dir);
4364 ret = btrfs_update_inode_fallback(trans, root, dir);
4365 if (ret)
4366 btrfs_abort_transaction(trans, ret);
4367 out:
4368 btrfs_free_path(path);
4369 return ret;
4370 }
4371
4372 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
4373 {
4374 struct inode *inode = d_inode(dentry);
4375 int err = 0;
4376 struct btrfs_root *root = BTRFS_I(dir)->root;
4377 struct btrfs_trans_handle *trans;
4378 u64 last_unlink_trans;
4379
4380 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
4381 return -ENOTEMPTY;
4382 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID)
4383 return -EPERM;
4384
4385 trans = __unlink_start_trans(dir);
4386 if (IS_ERR(trans))
4387 return PTR_ERR(trans);
4388
4389 if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
4390 err = btrfs_unlink_subvol(trans, root, dir,
4391 BTRFS_I(inode)->location.objectid,
4392 dentry->d_name.name,
4393 dentry->d_name.len);
4394 goto out;
4395 }
4396
4397 err = btrfs_orphan_add(trans, BTRFS_I(inode));
4398 if (err)
4399 goto out;
4400
4401 last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
4402
4403 /* now the directory is empty */
4404 err = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
4405 BTRFS_I(d_inode(dentry)), dentry->d_name.name,
4406 dentry->d_name.len);
4407 if (!err) {
4408 btrfs_i_size_write(BTRFS_I(inode), 0);
4409 /*
4410 * Propagate the last_unlink_trans value of the deleted dir to
4411 * its parent directory. This is to prevent an unrecoverable
4412 * log tree in the case we do something like this:
4413 * 1) create dir foo
4414 * 2) create snapshot under dir foo
4415 * 3) delete the snapshot
4416 * 4) rmdir foo
4417 * 5) mkdir foo
4418 * 6) fsync foo or some file inside foo
4419 */
4420 if (last_unlink_trans >= trans->transid)
4421 BTRFS_I(dir)->last_unlink_trans = last_unlink_trans;
4422 }
4423 out:
4424 btrfs_end_transaction(trans);
4425 btrfs_btree_balance_dirty(root->fs_info);
4426
4427 return err;
4428 }
4429
4430 static int truncate_space_check(struct btrfs_trans_handle *trans,
4431 struct btrfs_root *root,
4432 u64 bytes_deleted)
4433 {
4434 struct btrfs_fs_info *fs_info = root->fs_info;
4435 int ret;
4436
4437 /*
4438 * This is only used to apply pressure to the enospc system, we don't
4439 * intend to use this reservation at all.
4440 */
4441 bytes_deleted = btrfs_csum_bytes_to_leaves(fs_info, bytes_deleted);
4442 bytes_deleted *= fs_info->nodesize;
4443 ret = btrfs_block_rsv_add(root, &fs_info->trans_block_rsv,
4444 bytes_deleted, BTRFS_RESERVE_NO_FLUSH);
4445 if (!ret) {
4446 trace_btrfs_space_reservation(fs_info, "transaction",
4447 trans->transid,
4448 bytes_deleted, 1);
4449 trans->bytes_reserved += bytes_deleted;
4450 }
4451 return ret;
4452
4453 }
4454
4455 static int truncate_inline_extent(struct inode *inode,
4456 struct btrfs_path *path,
4457 struct btrfs_key *found_key,
4458 const u64 item_end,
4459 const u64 new_size)
4460 {
4461 struct extent_buffer *leaf = path->nodes[0];
4462 int slot = path->slots[0];
4463 struct btrfs_file_extent_item *fi;
4464 u32 size = (u32)(new_size - found_key->offset);
4465 struct btrfs_root *root = BTRFS_I(inode)->root;
4466
4467 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
4468
4469 if (btrfs_file_extent_compression(leaf, fi) != BTRFS_COMPRESS_NONE) {
4470 loff_t offset = new_size;
4471 loff_t page_end = ALIGN(offset, PAGE_SIZE);
4472
4473 /*
4474 * Zero out the remaining of the last page of our inline extent,
4475 * instead of directly truncating our inline extent here - that
4476 * would be much more complex (decompressing all the data, then
4477 * compressing the truncated data, which might be bigger than
4478 * the size of the inline extent, resize the extent, etc).
4479 * We release the path because to get the page we might need to
4480 * read the extent item from disk (data not in the page cache).
4481 */
4482 btrfs_release_path(path);
4483 return btrfs_truncate_block(inode, offset, page_end - offset,
4484 0);
4485 }
4486
4487 btrfs_set_file_extent_ram_bytes(leaf, fi, size);
4488 size = btrfs_file_extent_calc_inline_size(size);
4489 btrfs_truncate_item(root->fs_info, path, size, 1);
4490
4491 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4492 inode_sub_bytes(inode, item_end + 1 - new_size);
4493
4494 return 0;
4495 }
4496
4497 /*
4498 * this can truncate away extent items, csum items and directory items.
4499 * It starts at a high offset and removes keys until it can't find
4500 * any higher than new_size
4501 *
4502 * csum items that cross the new i_size are truncated to the new size
4503 * as well.
4504 *
4505 * min_type is the minimum key type to truncate down to. If set to 0, this
4506 * will kill all the items on this inode, including the INODE_ITEM_KEY.
4507 */
4508 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
4509 struct btrfs_root *root,
4510 struct inode *inode,
4511 u64 new_size, u32 min_type)
4512 {
4513 struct btrfs_fs_info *fs_info = root->fs_info;
4514 struct btrfs_path *path;
4515 struct extent_buffer *leaf;
4516 struct btrfs_file_extent_item *fi;
4517 struct btrfs_key key;
4518 struct btrfs_key found_key;
4519 u64 extent_start = 0;
4520 u64 extent_num_bytes = 0;
4521 u64 extent_offset = 0;
4522 u64 item_end = 0;
4523 u64 last_size = new_size;
4524 u32 found_type = (u8)-1;
4525 int found_extent;
4526 int del_item;
4527 int pending_del_nr = 0;
4528 int pending_del_slot = 0;
4529 int extent_type = -1;
4530 int ret;
4531 int err = 0;
4532 u64 ino = btrfs_ino(BTRFS_I(inode));
4533 u64 bytes_deleted = 0;
4534 bool be_nice = 0;
4535 bool should_throttle = 0;
4536 bool should_end = 0;
4537
4538 BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
4539
4540 /*
4541 * for non-free space inodes and ref cows, we want to back off from
4542 * time to time
4543 */
4544 if (!btrfs_is_free_space_inode(BTRFS_I(inode)) &&
4545 test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4546 be_nice = 1;
4547
4548 path = btrfs_alloc_path();
4549 if (!path)
4550 return -ENOMEM;
4551 path->reada = READA_BACK;
4552
4553 /*
4554 * We want to drop from the next block forward in case this new size is
4555 * not block aligned since we will be keeping the last block of the
4556 * extent just the way it is.
4557 */
4558 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
4559 root == fs_info->tree_root)
4560 btrfs_drop_extent_cache(BTRFS_I(inode), ALIGN(new_size,
4561 fs_info->sectorsize),
4562 (u64)-1, 0);
4563
4564 /*
4565 * This function is also used to drop the items in the log tree before
4566 * we relog the inode, so if root != BTRFS_I(inode)->root, it means
4567 * it is used to drop the loged items. So we shouldn't kill the delayed
4568 * items.
4569 */
4570 if (min_type == 0 && root == BTRFS_I(inode)->root)
4571 btrfs_kill_delayed_inode_items(BTRFS_I(inode));
4572
4573 key.objectid = ino;
4574 key.offset = (u64)-1;
4575 key.type = (u8)-1;
4576
4577 search_again:
4578 /*
4579 * with a 16K leaf size and 128MB extents, you can actually queue
4580 * up a huge file in a single leaf. Most of the time that
4581 * bytes_deleted is > 0, it will be huge by the time we get here
4582 */
4583 if (be_nice && bytes_deleted > SZ_32M) {
4584 if (btrfs_should_end_transaction(trans)) {
4585 err = -EAGAIN;
4586 goto error;
4587 }
4588 }
4589
4590
4591 path->leave_spinning = 1;
4592 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
4593 if (ret < 0) {
4594 err = ret;
4595 goto out;
4596 }
4597
4598 if (ret > 0) {
4599 /* there are no items in the tree for us to truncate, we're
4600 * done
4601 */
4602 if (path->slots[0] == 0)
4603 goto out;
4604 path->slots[0]--;
4605 }
4606
4607 while (1) {
4608 fi = NULL;
4609 leaf = path->nodes[0];
4610 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4611 found_type = found_key.type;
4612
4613 if (found_key.objectid != ino)
4614 break;
4615
4616 if (found_type < min_type)
4617 break;
4618
4619 item_end = found_key.offset;
4620 if (found_type == BTRFS_EXTENT_DATA_KEY) {
4621 fi = btrfs_item_ptr(leaf, path->slots[0],
4622 struct btrfs_file_extent_item);
4623 extent_type = btrfs_file_extent_type(leaf, fi);
4624 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4625 item_end +=
4626 btrfs_file_extent_num_bytes(leaf, fi);
4627
4628 trace_btrfs_truncate_show_fi_regular(
4629 BTRFS_I(inode), leaf, fi,
4630 found_key.offset);
4631 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4632 item_end += btrfs_file_extent_inline_len(leaf,
4633 path->slots[0], fi);
4634
4635 trace_btrfs_truncate_show_fi_inline(
4636 BTRFS_I(inode), leaf, fi, path->slots[0],
4637 found_key.offset);
4638 }
4639 item_end--;
4640 }
4641 if (found_type > min_type) {
4642 del_item = 1;
4643 } else {
4644 if (item_end < new_size)
4645 break;
4646 if (found_key.offset >= new_size)
4647 del_item = 1;
4648 else
4649 del_item = 0;
4650 }
4651 found_extent = 0;
4652 /* FIXME, shrink the extent if the ref count is only 1 */
4653 if (found_type != BTRFS_EXTENT_DATA_KEY)
4654 goto delete;
4655
4656 if (del_item)
4657 last_size = found_key.offset;
4658 else
4659 last_size = new_size;
4660
4661 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4662 u64 num_dec;
4663 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
4664 if (!del_item) {
4665 u64 orig_num_bytes =
4666 btrfs_file_extent_num_bytes(leaf, fi);
4667 extent_num_bytes = ALIGN(new_size -
4668 found_key.offset,
4669 fs_info->sectorsize);
4670 btrfs_set_file_extent_num_bytes(leaf, fi,
4671 extent_num_bytes);
4672 num_dec = (orig_num_bytes -
4673 extent_num_bytes);
4674 if (test_bit(BTRFS_ROOT_REF_COWS,
4675 &root->state) &&
4676 extent_start != 0)
4677 inode_sub_bytes(inode, num_dec);
4678 btrfs_mark_buffer_dirty(leaf);
4679 } else {
4680 extent_num_bytes =
4681 btrfs_file_extent_disk_num_bytes(leaf,
4682 fi);
4683 extent_offset = found_key.offset -
4684 btrfs_file_extent_offset(leaf, fi);
4685
4686 /* FIXME blocksize != 4096 */
4687 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
4688 if (extent_start != 0) {
4689 found_extent = 1;
4690 if (test_bit(BTRFS_ROOT_REF_COWS,
4691 &root->state))
4692 inode_sub_bytes(inode, num_dec);
4693 }
4694 }
4695 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4696 /*
4697 * we can't truncate inline items that have had
4698 * special encodings
4699 */
4700 if (!del_item &&
4701 btrfs_file_extent_encryption(leaf, fi) == 0 &&
4702 btrfs_file_extent_other_encoding(leaf, fi) == 0) {
4703
4704 /*
4705 * Need to release path in order to truncate a
4706 * compressed extent. So delete any accumulated
4707 * extent items so far.
4708 */
4709 if (btrfs_file_extent_compression(leaf, fi) !=
4710 BTRFS_COMPRESS_NONE && pending_del_nr) {
4711 err = btrfs_del_items(trans, root, path,
4712 pending_del_slot,
4713 pending_del_nr);
4714 if (err) {
4715 btrfs_abort_transaction(trans,
4716 err);
4717 goto error;
4718 }
4719 pending_del_nr = 0;
4720 }
4721
4722 err = truncate_inline_extent(inode, path,
4723 &found_key,
4724 item_end,
4725 new_size);
4726 if (err) {
4727 btrfs_abort_transaction(trans, err);
4728 goto error;
4729 }
4730 } else if (test_bit(BTRFS_ROOT_REF_COWS,
4731 &root->state)) {
4732 inode_sub_bytes(inode, item_end + 1 - new_size);
4733 }
4734 }
4735 delete:
4736 if (del_item) {
4737 if (!pending_del_nr) {
4738 /* no pending yet, add ourselves */
4739 pending_del_slot = path->slots[0];
4740 pending_del_nr = 1;
4741 } else if (pending_del_nr &&
4742 path->slots[0] + 1 == pending_del_slot) {
4743 /* hop on the pending chunk */
4744 pending_del_nr++;
4745 pending_del_slot = path->slots[0];
4746 } else {
4747 BUG();
4748 }
4749 } else {
4750 break;
4751 }
4752 should_throttle = 0;
4753
4754 if (found_extent &&
4755 (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
4756 root == fs_info->tree_root)) {
4757 btrfs_set_path_blocking(path);
4758 bytes_deleted += extent_num_bytes;
4759 ret = btrfs_free_extent(trans, fs_info, extent_start,
4760 extent_num_bytes, 0,
4761 btrfs_header_owner(leaf),
4762 ino, extent_offset);
4763 if (ret) {
4764 btrfs_abort_transaction(trans, ret);
4765 break;
4766 }
4767 if (btrfs_should_throttle_delayed_refs(trans, fs_info))
4768 btrfs_async_run_delayed_refs(fs_info,
4769 trans->delayed_ref_updates * 2,
4770 trans->transid, 0);
4771 if (be_nice) {
4772 if (truncate_space_check(trans, root,
4773 extent_num_bytes)) {
4774 should_end = 1;
4775 }
4776 if (btrfs_should_throttle_delayed_refs(trans,
4777 fs_info))
4778 should_throttle = 1;
4779 }
4780 }
4781
4782 if (found_type == BTRFS_INODE_ITEM_KEY)
4783 break;
4784
4785 if (path->slots[0] == 0 ||
4786 path->slots[0] != pending_del_slot ||
4787 should_throttle || should_end) {
4788 if (pending_del_nr) {
4789 ret = btrfs_del_items(trans, root, path,
4790 pending_del_slot,
4791 pending_del_nr);
4792 if (ret) {
4793 btrfs_abort_transaction(trans, ret);
4794 goto error;
4795 }
4796 pending_del_nr = 0;
4797 }
4798 btrfs_release_path(path);
4799 if (should_throttle) {
4800 unsigned long updates = trans->delayed_ref_updates;
4801 if (updates) {
4802 trans->delayed_ref_updates = 0;
4803 ret = btrfs_run_delayed_refs(trans,
4804 fs_info,
4805 updates * 2);
4806 if (ret && !err)
4807 err = ret;
4808 }
4809 }
4810 /*
4811 * if we failed to refill our space rsv, bail out
4812 * and let the transaction restart
4813 */
4814 if (should_end) {
4815 err = -EAGAIN;
4816 goto error;
4817 }
4818 goto search_again;
4819 } else {
4820 path->slots[0]--;
4821 }
4822 }
4823 out:
4824 if (pending_del_nr) {
4825 ret = btrfs_del_items(trans, root, path, pending_del_slot,
4826 pending_del_nr);
4827 if (ret)
4828 btrfs_abort_transaction(trans, ret);
4829 }
4830 error:
4831 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4832 ASSERT(last_size >= new_size);
4833 if (!err && last_size > new_size)
4834 last_size = new_size;
4835 btrfs_ordered_update_i_size(inode, last_size, NULL);
4836 }
4837
4838 btrfs_free_path(path);
4839
4840 if (be_nice && bytes_deleted > SZ_32M) {
4841 unsigned long updates = trans->delayed_ref_updates;
4842 if (updates) {
4843 trans->delayed_ref_updates = 0;
4844 ret = btrfs_run_delayed_refs(trans, fs_info,
4845 updates * 2);
4846 if (ret && !err)
4847 err = ret;
4848 }
4849 }
4850 return err;
4851 }
4852
4853 /*
4854 * btrfs_truncate_block - read, zero a chunk and write a block
4855 * @inode - inode that we're zeroing
4856 * @from - the offset to start zeroing
4857 * @len - the length to zero, 0 to zero the entire range respective to the
4858 * offset
4859 * @front - zero up to the offset instead of from the offset on
4860 *
4861 * This will find the block for the "from" offset and cow the block and zero the
4862 * part we want to zero. This is used with truncate and hole punching.
4863 */
4864 int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len,
4865 int front)
4866 {
4867 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4868 struct address_space *mapping = inode->i_mapping;
4869 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4870 struct btrfs_ordered_extent *ordered;
4871 struct extent_state *cached_state = NULL;
4872 struct extent_changeset *data_reserved = NULL;
4873 char *kaddr;
4874 u32 blocksize = fs_info->sectorsize;
4875 pgoff_t index = from >> PAGE_SHIFT;
4876 unsigned offset = from & (blocksize - 1);
4877 struct page *page;
4878 gfp_t mask = btrfs_alloc_write_mask(mapping);
4879 int ret = 0;
4880 u64 block_start;
4881 u64 block_end;
4882
4883 if ((offset & (blocksize - 1)) == 0 &&
4884 (!len || ((len & (blocksize - 1)) == 0)))
4885 goto out;
4886
4887 ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
4888 round_down(from, blocksize), blocksize);
4889 if (ret)
4890 goto out;
4891
4892 again:
4893 page = find_or_create_page(mapping, index, mask);
4894 if (!page) {
4895 btrfs_delalloc_release_space(inode, data_reserved,
4896 round_down(from, blocksize),
4897 blocksize);
4898 ret = -ENOMEM;
4899 goto out;
4900 }
4901
4902 block_start = round_down(from, blocksize);
4903 block_end = block_start + blocksize - 1;
4904
4905 if (!PageUptodate(page)) {
4906 ret = btrfs_readpage(NULL, page);
4907 lock_page(page);
4908 if (page->mapping != mapping) {
4909 unlock_page(page);
4910 put_page(page);
4911 goto again;
4912 }
4913 if (!PageUptodate(page)) {
4914 ret = -EIO;
4915 goto out_unlock;
4916 }
4917 }
4918 wait_on_page_writeback(page);
4919
4920 lock_extent_bits(io_tree, block_start, block_end, &cached_state);
4921 set_page_extent_mapped(page);
4922
4923 ordered = btrfs_lookup_ordered_extent(inode, block_start);
4924 if (ordered) {
4925 unlock_extent_cached(io_tree, block_start, block_end,
4926 &cached_state, GFP_NOFS);
4927 unlock_page(page);
4928 put_page(page);
4929 btrfs_start_ordered_extent(inode, ordered, 1);
4930 btrfs_put_ordered_extent(ordered);
4931 goto again;
4932 }
4933
4934 clear_extent_bit(&BTRFS_I(inode)->io_tree, block_start, block_end,
4935 EXTENT_DIRTY | EXTENT_DELALLOC |
4936 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
4937 0, 0, &cached_state, GFP_NOFS);
4938
4939 ret = btrfs_set_extent_delalloc(inode, block_start, block_end,
4940 &cached_state, 0);
4941 if (ret) {
4942 unlock_extent_cached(io_tree, block_start, block_end,
4943 &cached_state, GFP_NOFS);
4944 goto out_unlock;
4945 }
4946
4947 if (offset != blocksize) {
4948 if (!len)
4949 len = blocksize - offset;
4950 kaddr = kmap(page);
4951 if (front)
4952 memset(kaddr + (block_start - page_offset(page)),
4953 0, offset);
4954 else
4955 memset(kaddr + (block_start - page_offset(page)) + offset,
4956 0, len);
4957 flush_dcache_page(page);
4958 kunmap(page);
4959 }
4960 ClearPageChecked(page);
4961 set_page_dirty(page);
4962 unlock_extent_cached(io_tree, block_start, block_end, &cached_state,
4963 GFP_NOFS);
4964
4965 out_unlock:
4966 if (ret)
4967 btrfs_delalloc_release_space(inode, data_reserved, block_start,
4968 blocksize);
4969 unlock_page(page);
4970 put_page(page);
4971 out:
4972 extent_changeset_free(data_reserved);
4973 return ret;
4974 }
4975
4976 static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
4977 u64 offset, u64 len)
4978 {
4979 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4980 struct btrfs_trans_handle *trans;
4981 int ret;
4982
4983 /*
4984 * Still need to make sure the inode looks like it's been updated so
4985 * that any holes get logged if we fsync.
4986 */
4987 if (btrfs_fs_incompat(fs_info, NO_HOLES)) {
4988 BTRFS_I(inode)->last_trans = fs_info->generation;
4989 BTRFS_I(inode)->last_sub_trans = root->log_transid;
4990 BTRFS_I(inode)->last_log_commit = root->last_log_commit;
4991 return 0;
4992 }
4993
4994 /*
4995 * 1 - for the one we're dropping
4996 * 1 - for the one we're adding
4997 * 1 - for updating the inode.
4998 */
4999 trans = btrfs_start_transaction(root, 3);
5000 if (IS_ERR(trans))
5001 return PTR_ERR(trans);
5002
5003 ret = btrfs_drop_extents(trans, root, inode, offset, offset + len, 1);
5004 if (ret) {
5005 btrfs_abort_transaction(trans, ret);
5006 btrfs_end_transaction(trans);
5007 return ret;
5008 }
5009
5010 ret = btrfs_insert_file_extent(trans, root, btrfs_ino(BTRFS_I(inode)),
5011 offset, 0, 0, len, 0, len, 0, 0, 0);
5012 if (ret)
5013 btrfs_abort_transaction(trans, ret);
5014 else
5015 btrfs_update_inode(trans, root, inode);
5016 btrfs_end_transaction(trans);
5017 return ret;
5018 }
5019
5020 /*
5021 * This function puts in dummy file extents for the area we're creating a hole
5022 * for. So if we are truncating this file to a larger size we need to insert
5023 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
5024 * the range between oldsize and size
5025 */
5026 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
5027 {
5028 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5029 struct btrfs_root *root = BTRFS_I(inode)->root;
5030 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5031 struct extent_map *em = NULL;
5032 struct extent_state *cached_state = NULL;
5033 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
5034 u64 hole_start = ALIGN(oldsize, fs_info->sectorsize);
5035 u64 block_end = ALIGN(size, fs_info->sectorsize);
5036 u64 last_byte;
5037 u64 cur_offset;
5038 u64 hole_size;
5039 int err = 0;
5040
5041 /*
5042 * If our size started in the middle of a block we need to zero out the
5043 * rest of the block before we expand the i_size, otherwise we could
5044 * expose stale data.
5045 */
5046 err = btrfs_truncate_block(inode, oldsize, 0, 0);
5047 if (err)
5048 return err;
5049
5050 if (size <= hole_start)
5051 return 0;
5052
5053 while (1) {
5054 struct btrfs_ordered_extent *ordered;
5055
5056 lock_extent_bits(io_tree, hole_start, block_end - 1,
5057 &cached_state);
5058 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), hole_start,
5059 block_end - hole_start);
5060 if (!ordered)
5061 break;
5062 unlock_extent_cached(io_tree, hole_start, block_end - 1,
5063 &cached_state, GFP_NOFS);
5064 btrfs_start_ordered_extent(inode, ordered, 1);
5065 btrfs_put_ordered_extent(ordered);
5066 }
5067
5068 cur_offset = hole_start;
5069 while (1) {
5070 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
5071 block_end - cur_offset, 0);
5072 if (IS_ERR(em)) {
5073 err = PTR_ERR(em);
5074 em = NULL;
5075 break;
5076 }
5077 last_byte = min(extent_map_end(em), block_end);
5078 last_byte = ALIGN(last_byte, fs_info->sectorsize);
5079 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
5080 struct extent_map *hole_em;
5081 hole_size = last_byte - cur_offset;
5082
5083 err = maybe_insert_hole(root, inode, cur_offset,
5084 hole_size);
5085 if (err)
5086 break;
5087 btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset,
5088 cur_offset + hole_size - 1, 0);
5089 hole_em = alloc_extent_map();
5090 if (!hole_em) {
5091 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5092 &BTRFS_I(inode)->runtime_flags);
5093 goto next;
5094 }
5095 hole_em->start = cur_offset;
5096 hole_em->len = hole_size;
5097 hole_em->orig_start = cur_offset;
5098
5099 hole_em->block_start = EXTENT_MAP_HOLE;
5100 hole_em->block_len = 0;
5101 hole_em->orig_block_len = 0;
5102 hole_em->ram_bytes = hole_size;
5103 hole_em->bdev = fs_info->fs_devices->latest_bdev;
5104 hole_em->compress_type = BTRFS_COMPRESS_NONE;
5105 hole_em->generation = fs_info->generation;
5106
5107 while (1) {
5108 write_lock(&em_tree->lock);
5109 err = add_extent_mapping(em_tree, hole_em, 1);
5110 write_unlock(&em_tree->lock);
5111 if (err != -EEXIST)
5112 break;
5113 btrfs_drop_extent_cache(BTRFS_I(inode),
5114 cur_offset,
5115 cur_offset +
5116 hole_size - 1, 0);
5117 }
5118 free_extent_map(hole_em);
5119 }
5120 next:
5121 free_extent_map(em);
5122 em = NULL;
5123 cur_offset = last_byte;
5124 if (cur_offset >= block_end)
5125 break;
5126 }
5127 free_extent_map(em);
5128 unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
5129 GFP_NOFS);
5130 return err;
5131 }
5132
5133 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
5134 {
5135 struct btrfs_root *root = BTRFS_I(inode)->root;
5136 struct btrfs_trans_handle *trans;
5137 loff_t oldsize = i_size_read(inode);
5138 loff_t newsize = attr->ia_size;
5139 int mask = attr->ia_valid;
5140 int ret;
5141
5142 /*
5143 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
5144 * special case where we need to update the times despite not having
5145 * these flags set. For all other operations the VFS set these flags
5146 * explicitly if it wants a timestamp update.
5147 */
5148 if (newsize != oldsize) {
5149 inode_inc_iversion(inode);
5150 if (!(mask & (ATTR_CTIME | ATTR_MTIME)))
5151 inode->i_ctime = inode->i_mtime =
5152 current_time(inode);
5153 }
5154
5155 if (newsize > oldsize) {
5156 /*
5157 * Don't do an expanding truncate while snapshotting is ongoing.
5158 * This is to ensure the snapshot captures a fully consistent
5159 * state of this file - if the snapshot captures this expanding
5160 * truncation, it must capture all writes that happened before
5161 * this truncation.
5162 */
5163 btrfs_wait_for_snapshot_creation(root);
5164 ret = btrfs_cont_expand(inode, oldsize, newsize);
5165 if (ret) {
5166 btrfs_end_write_no_snapshotting(root);
5167 return ret;
5168 }
5169
5170 trans = btrfs_start_transaction(root, 1);
5171 if (IS_ERR(trans)) {
5172 btrfs_end_write_no_snapshotting(root);
5173 return PTR_ERR(trans);
5174 }
5175
5176 i_size_write(inode, newsize);
5177 btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
5178 pagecache_isize_extended(inode, oldsize, newsize);
5179 ret = btrfs_update_inode(trans, root, inode);
5180 btrfs_end_write_no_snapshotting(root);
5181 btrfs_end_transaction(trans);
5182 } else {
5183
5184 /*
5185 * We're truncating a file that used to have good data down to
5186 * zero. Make sure it gets into the ordered flush list so that
5187 * any new writes get down to disk quickly.
5188 */
5189 if (newsize == 0)
5190 set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
5191 &BTRFS_I(inode)->runtime_flags);
5192
5193 /*
5194 * 1 for the orphan item we're going to add
5195 * 1 for the orphan item deletion.
5196 */
5197 trans = btrfs_start_transaction(root, 2);
5198 if (IS_ERR(trans))
5199 return PTR_ERR(trans);
5200
5201 /*
5202 * We need to do this in case we fail at _any_ point during the
5203 * actual truncate. Once we do the truncate_setsize we could
5204 * invalidate pages which forces any outstanding ordered io to
5205 * be instantly completed which will give us extents that need
5206 * to be truncated. If we fail to get an orphan inode down we
5207 * could have left over extents that were never meant to live,
5208 * so we need to guarantee from this point on that everything
5209 * will be consistent.
5210 */
5211 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
5212 btrfs_end_transaction(trans);
5213 if (ret)
5214 return ret;
5215
5216 /* we don't support swapfiles, so vmtruncate shouldn't fail */
5217 truncate_setsize(inode, newsize);
5218
5219 /* Disable nonlocked read DIO to avoid the end less truncate */
5220 btrfs_inode_block_unlocked_dio(BTRFS_I(inode));
5221 inode_dio_wait(inode);
5222 btrfs_inode_resume_unlocked_dio(BTRFS_I(inode));
5223
5224 ret = btrfs_truncate(inode);
5225 if (ret && inode->i_nlink) {
5226 int err;
5227
5228 /* To get a stable disk_i_size */
5229 err = btrfs_wait_ordered_range(inode, 0, (u64)-1);
5230 if (err) {
5231 btrfs_orphan_del(NULL, BTRFS_I(inode));
5232 return err;
5233 }
5234
5235 /*
5236 * failed to truncate, disk_i_size is only adjusted down
5237 * as we remove extents, so it should represent the true
5238 * size of the inode, so reset the in memory size and
5239 * delete our orphan entry.
5240 */
5241 trans = btrfs_join_transaction(root);
5242 if (IS_ERR(trans)) {
5243 btrfs_orphan_del(NULL, BTRFS_I(inode));
5244 return ret;
5245 }
5246 i_size_write(inode, BTRFS_I(inode)->disk_i_size);
5247 err = btrfs_orphan_del(trans, BTRFS_I(inode));
5248 if (err)
5249 btrfs_abort_transaction(trans, err);
5250 btrfs_end_transaction(trans);
5251 }
5252 }
5253
5254 return ret;
5255 }
5256
5257 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
5258 {
5259 struct inode *inode = d_inode(dentry);
5260 struct btrfs_root *root = BTRFS_I(inode)->root;
5261 int err;
5262
5263 if (btrfs_root_readonly(root))
5264 return -EROFS;
5265
5266 err = setattr_prepare(dentry, attr);
5267 if (err)
5268 return err;
5269
5270 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
5271 err = btrfs_setsize(inode, attr);
5272 if (err)
5273 return err;
5274 }
5275
5276 if (attr->ia_valid) {
5277 setattr_copy(inode, attr);
5278 inode_inc_iversion(inode);
5279 err = btrfs_dirty_inode(inode);
5280
5281 if (!err && attr->ia_valid & ATTR_MODE)
5282 err = posix_acl_chmod(inode, inode->i_mode);
5283 }
5284
5285 return err;
5286 }
5287
5288 /*
5289 * While truncating the inode pages during eviction, we get the VFS calling
5290 * btrfs_invalidatepage() against each page of the inode. This is slow because
5291 * the calls to btrfs_invalidatepage() result in a huge amount of calls to
5292 * lock_extent_bits() and clear_extent_bit(), which keep merging and splitting
5293 * extent_state structures over and over, wasting lots of time.
5294 *
5295 * Therefore if the inode is being evicted, let btrfs_invalidatepage() skip all
5296 * those expensive operations on a per page basis and do only the ordered io
5297 * finishing, while we release here the extent_map and extent_state structures,
5298 * without the excessive merging and splitting.
5299 */
5300 static void evict_inode_truncate_pages(struct inode *inode)
5301 {
5302 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5303 struct extent_map_tree *map_tree = &BTRFS_I(inode)->extent_tree;
5304 struct rb_node *node;
5305
5306 ASSERT(inode->i_state & I_FREEING);
5307 truncate_inode_pages_final(&inode->i_data);
5308
5309 write_lock(&map_tree->lock);
5310 while (!RB_EMPTY_ROOT(&map_tree->map)) {
5311 struct extent_map *em;
5312
5313 node = rb_first(&map_tree->map);
5314 em = rb_entry(node, struct extent_map, rb_node);
5315 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
5316 clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
5317 remove_extent_mapping(map_tree, em);
5318 free_extent_map(em);
5319 if (need_resched()) {
5320 write_unlock(&map_tree->lock);
5321 cond_resched();
5322 write_lock(&map_tree->lock);
5323 }
5324 }
5325 write_unlock(&map_tree->lock);
5326
5327 /*
5328 * Keep looping until we have no more ranges in the io tree.
5329 * We can have ongoing bios started by readpages (called from readahead)
5330 * that have their endio callback (extent_io.c:end_bio_extent_readpage)
5331 * still in progress (unlocked the pages in the bio but did not yet
5332 * unlocked the ranges in the io tree). Therefore this means some
5333 * ranges can still be locked and eviction started because before
5334 * submitting those bios, which are executed by a separate task (work
5335 * queue kthread), inode references (inode->i_count) were not taken
5336 * (which would be dropped in the end io callback of each bio).
5337 * Therefore here we effectively end up waiting for those bios and
5338 * anyone else holding locked ranges without having bumped the inode's
5339 * reference count - if we don't do it, when they access the inode's
5340 * io_tree to unlock a range it may be too late, leading to an
5341 * use-after-free issue.
5342 */
5343 spin_lock(&io_tree->lock);
5344 while (!RB_EMPTY_ROOT(&io_tree->state)) {
5345 struct extent_state *state;
5346 struct extent_state *cached_state = NULL;
5347 u64 start;
5348 u64 end;
5349 unsigned state_flags;
5350
5351 node = rb_first(&io_tree->state);
5352 state = rb_entry(node, struct extent_state, rb_node);
5353 start = state->start;
5354 end = state->end;
5355 state_flags = state->state;
5356 spin_unlock(&io_tree->lock);
5357
5358 lock_extent_bits(io_tree, start, end, &cached_state);
5359
5360 /*
5361 * If still has DELALLOC flag, the extent didn't reach disk,
5362 * and its reserved space won't be freed by delayed_ref.
5363 * So we need to free its reserved space here.
5364 * (Refer to comment in btrfs_invalidatepage, case 2)
5365 *
5366 * Note, end is the bytenr of last byte, so we need + 1 here.
5367 */
5368 if (state_flags & EXTENT_DELALLOC)
5369 btrfs_qgroup_free_data(inode, NULL, start, end - start + 1);
5370
5371 clear_extent_bit(io_tree, start, end,
5372 EXTENT_LOCKED | EXTENT_DIRTY |
5373 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
5374 EXTENT_DEFRAG, 1, 1,
5375 &cached_state, GFP_NOFS);
5376
5377 cond_resched();
5378 spin_lock(&io_tree->lock);
5379 }
5380 spin_unlock(&io_tree->lock);
5381 }
5382
5383 void btrfs_evict_inode(struct inode *inode)
5384 {
5385 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5386 struct btrfs_trans_handle *trans;
5387 struct btrfs_root *root = BTRFS_I(inode)->root;
5388 struct btrfs_block_rsv *rsv, *global_rsv;
5389 int steal_from_global = 0;
5390 u64 min_size;
5391 int ret;
5392
5393 trace_btrfs_inode_evict(inode);
5394
5395 if (!root) {
5396 clear_inode(inode);
5397 return;
5398 }
5399
5400 min_size = btrfs_calc_trunc_metadata_size(fs_info, 1);
5401
5402 evict_inode_truncate_pages(inode);
5403
5404 if (inode->i_nlink &&
5405 ((btrfs_root_refs(&root->root_item) != 0 &&
5406 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) ||
5407 btrfs_is_free_space_inode(BTRFS_I(inode))))
5408 goto no_delete;
5409
5410 if (is_bad_inode(inode)) {
5411 btrfs_orphan_del(NULL, BTRFS_I(inode));
5412 goto no_delete;
5413 }
5414 /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
5415 if (!special_file(inode->i_mode))
5416 btrfs_wait_ordered_range(inode, 0, (u64)-1);
5417
5418 btrfs_free_io_failure_record(BTRFS_I(inode), 0, (u64)-1);
5419
5420 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
5421 BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
5422 &BTRFS_I(inode)->runtime_flags));
5423 goto no_delete;
5424 }
5425
5426 if (inode->i_nlink > 0) {
5427 BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
5428 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID);
5429 goto no_delete;
5430 }
5431
5432 ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode));
5433 if (ret) {
5434 btrfs_orphan_del(NULL, BTRFS_I(inode));
5435 goto no_delete;
5436 }
5437
5438 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
5439 if (!rsv) {
5440 btrfs_orphan_del(NULL, BTRFS_I(inode));
5441 goto no_delete;
5442 }
5443 rsv->size = min_size;
5444 rsv->failfast = 1;
5445 global_rsv = &fs_info->global_block_rsv;
5446
5447 btrfs_i_size_write(BTRFS_I(inode), 0);
5448
5449 /*
5450 * This is a bit simpler than btrfs_truncate since we've already
5451 * reserved our space for our orphan item in the unlink, so we just
5452 * need to reserve some slack space in case we add bytes and update
5453 * inode item when doing the truncate.
5454 */
5455 while (1) {
5456 ret = btrfs_block_rsv_refill(root, rsv, min_size,
5457 BTRFS_RESERVE_FLUSH_LIMIT);
5458
5459 /*
5460 * Try and steal from the global reserve since we will
5461 * likely not use this space anyway, we want to try as
5462 * hard as possible to get this to work.
5463 */
5464 if (ret)
5465 steal_from_global++;
5466 else
5467 steal_from_global = 0;
5468 ret = 0;
5469
5470 /*
5471 * steal_from_global == 0: we reserved stuff, hooray!
5472 * steal_from_global == 1: we didn't reserve stuff, boo!
5473 * steal_from_global == 2: we've committed, still not a lot of
5474 * room but maybe we'll have room in the global reserve this
5475 * time.
5476 * steal_from_global == 3: abandon all hope!
5477 */
5478 if (steal_from_global > 2) {
5479 btrfs_warn(fs_info,
5480 "Could not get space for a delete, will truncate on mount %d",
5481 ret);
5482 btrfs_orphan_del(NULL, BTRFS_I(inode));
5483 btrfs_free_block_rsv(fs_info, rsv);
5484 goto no_delete;
5485 }
5486
5487 trans = btrfs_join_transaction(root);
5488 if (IS_ERR(trans)) {
5489 btrfs_orphan_del(NULL, BTRFS_I(inode));
5490 btrfs_free_block_rsv(fs_info, rsv);
5491 goto no_delete;
5492 }
5493
5494 /*
5495 * We can't just steal from the global reserve, we need to make
5496 * sure there is room to do it, if not we need to commit and try
5497 * again.
5498 */
5499 if (steal_from_global) {
5500 if (!btrfs_check_space_for_delayed_refs(trans, fs_info))
5501 ret = btrfs_block_rsv_migrate(global_rsv, rsv,
5502 min_size, 0);
5503 else
5504 ret = -ENOSPC;
5505 }
5506
5507 /*
5508 * Couldn't steal from the global reserve, we have too much
5509 * pending stuff built up, commit the transaction and try it
5510 * again.
5511 */
5512 if (ret) {
5513 ret = btrfs_commit_transaction(trans);
5514 if (ret) {
5515 btrfs_orphan_del(NULL, BTRFS_I(inode));
5516 btrfs_free_block_rsv(fs_info, rsv);
5517 goto no_delete;
5518 }
5519 continue;
5520 } else {
5521 steal_from_global = 0;
5522 }
5523
5524 trans->block_rsv = rsv;
5525
5526 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
5527 if (ret) {
5528 trans->block_rsv = &fs_info->trans_block_rsv;
5529 btrfs_end_transaction(trans);
5530 btrfs_btree_balance_dirty(fs_info);
5531 if (ret != -ENOSPC && ret != -EAGAIN) {
5532 btrfs_orphan_del(NULL, BTRFS_I(inode));
5533 btrfs_free_block_rsv(fs_info, rsv);
5534 goto no_delete;
5535 }
5536 } else {
5537 break;
5538 }
5539 }
5540
5541 btrfs_free_block_rsv(fs_info, rsv);
5542
5543 /*
5544 * Errors here aren't a big deal, it just means we leave orphan items
5545 * in the tree. They will be cleaned up on the next mount.
5546 */
5547 trans->block_rsv = root->orphan_block_rsv;
5548 btrfs_orphan_del(trans, BTRFS_I(inode));
5549
5550 trans->block_rsv = &fs_info->trans_block_rsv;
5551 if (!(root == fs_info->tree_root ||
5552 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
5553 btrfs_return_ino(root, btrfs_ino(BTRFS_I(inode)));
5554
5555 btrfs_end_transaction(trans);
5556 btrfs_btree_balance_dirty(fs_info);
5557 no_delete:
5558 btrfs_remove_delayed_node(BTRFS_I(inode));
5559 clear_inode(inode);
5560 }
5561
5562 /*
5563 * this returns the key found in the dir entry in the location pointer.
5564 * If no dir entries were found, location->objectid is 0.
5565 */
5566 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
5567 struct btrfs_key *location)
5568 {
5569 const char *name = dentry->d_name.name;
5570 int namelen = dentry->d_name.len;
5571 struct btrfs_dir_item *di;
5572 struct btrfs_path *path;
5573 struct btrfs_root *root = BTRFS_I(dir)->root;
5574 int ret = 0;
5575
5576 path = btrfs_alloc_path();
5577 if (!path)
5578 return -ENOMEM;
5579
5580 di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(BTRFS_I(dir)),
5581 name, namelen, 0);
5582 if (IS_ERR(di))
5583 ret = PTR_ERR(di);
5584
5585 if (IS_ERR_OR_NULL(di))
5586 goto out_err;
5587
5588 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
5589 if (location->type != BTRFS_INODE_ITEM_KEY &&
5590 location->type != BTRFS_ROOT_ITEM_KEY) {
5591 btrfs_warn(root->fs_info,
5592 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
5593 __func__, name, btrfs_ino(BTRFS_I(dir)),
5594 location->objectid, location->type, location->offset);
5595 goto out_err;
5596 }
5597 out:
5598 btrfs_free_path(path);
5599 return ret;
5600 out_err:
5601 location->objectid = 0;
5602 goto out;
5603 }
5604
5605 /*
5606 * when we hit a tree root in a directory, the btrfs part of the inode
5607 * needs to be changed to reflect the root directory of the tree root. This
5608 * is kind of like crossing a mount point.
5609 */
5610 static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
5611 struct inode *dir,
5612 struct dentry *dentry,
5613 struct btrfs_key *location,
5614 struct btrfs_root **sub_root)
5615 {
5616 struct btrfs_path *path;
5617 struct btrfs_root *new_root;
5618 struct btrfs_root_ref *ref;
5619 struct extent_buffer *leaf;
5620 struct btrfs_key key;
5621 int ret;
5622 int err = 0;
5623
5624 path = btrfs_alloc_path();
5625 if (!path) {
5626 err = -ENOMEM;
5627 goto out;
5628 }
5629
5630 err = -ENOENT;
5631 key.objectid = BTRFS_I(dir)->root->root_key.objectid;
5632 key.type = BTRFS_ROOT_REF_KEY;
5633 key.offset = location->objectid;
5634
5635 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
5636 if (ret) {
5637 if (ret < 0)
5638 err = ret;
5639 goto out;
5640 }
5641
5642 leaf = path->nodes[0];
5643 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
5644 if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(BTRFS_I(dir)) ||
5645 btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
5646 goto out;
5647
5648 ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
5649 (unsigned long)(ref + 1),
5650 dentry->d_name.len);
5651 if (ret)
5652 goto out;
5653
5654 btrfs_release_path(path);
5655
5656 new_root = btrfs_read_fs_root_no_name(fs_info, location);
5657 if (IS_ERR(new_root)) {
5658 err = PTR_ERR(new_root);
5659 goto out;
5660 }
5661
5662 *sub_root = new_root;
5663 location->objectid = btrfs_root_dirid(&new_root->root_item);
5664 location->type = BTRFS_INODE_ITEM_KEY;
5665 location->offset = 0;
5666 err = 0;
5667 out:
5668 btrfs_free_path(path);
5669 return err;
5670 }
5671
5672 static void inode_tree_add(struct inode *inode)
5673 {
5674 struct btrfs_root *root = BTRFS_I(inode)->root;
5675 struct btrfs_inode *entry;
5676 struct rb_node **p;
5677 struct rb_node *parent;
5678 struct rb_node *new = &BTRFS_I(inode)->rb_node;
5679 u64 ino = btrfs_ino(BTRFS_I(inode));
5680
5681 if (inode_unhashed(inode))
5682 return;
5683 parent = NULL;
5684 spin_lock(&root->inode_lock);
5685 p = &root->inode_tree.rb_node;
5686 while (*p) {
5687 parent = *p;
5688 entry = rb_entry(parent, struct btrfs_inode, rb_node);
5689
5690 if (ino < btrfs_ino(BTRFS_I(&entry->vfs_inode)))
5691 p = &parent->rb_left;
5692 else if (ino > btrfs_ino(BTRFS_I(&entry->vfs_inode)))
5693 p = &parent->rb_right;
5694 else {
5695 WARN_ON(!(entry->vfs_inode.i_state &
5696 (I_WILL_FREE | I_FREEING)));
5697 rb_replace_node(parent, new, &root->inode_tree);
5698 RB_CLEAR_NODE(parent);
5699 spin_unlock(&root->inode_lock);
5700 return;
5701 }
5702 }
5703 rb_link_node(new, parent, p);
5704 rb_insert_color(new, &root->inode_tree);
5705 spin_unlock(&root->inode_lock);
5706 }
5707
5708 static void inode_tree_del(struct inode *inode)
5709 {
5710 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5711 struct btrfs_root *root = BTRFS_I(inode)->root;
5712 int empty = 0;
5713
5714 spin_lock(&root->inode_lock);
5715 if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
5716 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
5717 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
5718 empty = RB_EMPTY_ROOT(&root->inode_tree);
5719 }
5720 spin_unlock(&root->inode_lock);
5721
5722 if (empty && btrfs_root_refs(&root->root_item) == 0) {
5723 synchronize_srcu(&fs_info->subvol_srcu);
5724 spin_lock(&root->inode_lock);
5725 empty = RB_EMPTY_ROOT(&root->inode_tree);
5726 spin_unlock(&root->inode_lock);
5727 if (empty)
5728 btrfs_add_dead_root(root);
5729 }
5730 }
5731
5732 void btrfs_invalidate_inodes(struct btrfs_root *root)
5733 {
5734 struct btrfs_fs_info *fs_info = root->fs_info;
5735 struct rb_node *node;
5736 struct rb_node *prev;
5737 struct btrfs_inode *entry;
5738 struct inode *inode;
5739 u64 objectid = 0;
5740
5741 if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
5742 WARN_ON(btrfs_root_refs(&root->root_item) != 0);
5743
5744 spin_lock(&root->inode_lock);
5745 again:
5746 node = root->inode_tree.rb_node;
5747 prev = NULL;
5748 while (node) {
5749 prev = node;
5750 entry = rb_entry(node, struct btrfs_inode, rb_node);
5751
5752 if (objectid < btrfs_ino(BTRFS_I(&entry->vfs_inode)))
5753 node = node->rb_left;
5754 else if (objectid > btrfs_ino(BTRFS_I(&entry->vfs_inode)))
5755 node = node->rb_right;
5756 else
5757 break;
5758 }
5759 if (!node) {
5760 while (prev) {
5761 entry = rb_entry(prev, struct btrfs_inode, rb_node);
5762 if (objectid <= btrfs_ino(BTRFS_I(&entry->vfs_inode))) {
5763 node = prev;
5764 break;
5765 }
5766 prev = rb_next(prev);
5767 }
5768 }
5769 while (node) {
5770 entry = rb_entry(node, struct btrfs_inode, rb_node);
5771 objectid = btrfs_ino(BTRFS_I(&entry->vfs_inode)) + 1;
5772 inode = igrab(&entry->vfs_inode);
5773 if (inode) {
5774 spin_unlock(&root->inode_lock);
5775 if (atomic_read(&inode->i_count) > 1)
5776 d_prune_aliases(inode);
5777 /*
5778 * btrfs_drop_inode will have it removed from
5779 * the inode cache when its usage count
5780 * hits zero.
5781 */
5782 iput(inode);
5783 cond_resched();
5784 spin_lock(&root->inode_lock);
5785 goto again;
5786 }
5787
5788 if (cond_resched_lock(&root->inode_lock))
5789 goto again;
5790
5791 node = rb_next(node);
5792 }
5793 spin_unlock(&root->inode_lock);
5794 }
5795
5796 static int btrfs_init_locked_inode(struct inode *inode, void *p)
5797 {
5798 struct btrfs_iget_args *args = p;
5799 inode->i_ino = args->location->objectid;
5800 memcpy(&BTRFS_I(inode)->location, args->location,
5801 sizeof(*args->location));
5802 BTRFS_I(inode)->root = args->root;
5803 return 0;
5804 }
5805
5806 static int btrfs_find_actor(struct inode *inode, void *opaque)
5807 {
5808 struct btrfs_iget_args *args = opaque;
5809 return args->location->objectid == BTRFS_I(inode)->location.objectid &&
5810 args->root == BTRFS_I(inode)->root;
5811 }
5812
5813 static struct inode *btrfs_iget_locked(struct super_block *s,
5814 struct btrfs_key *location,
5815 struct btrfs_root *root)
5816 {
5817 struct inode *inode;
5818 struct btrfs_iget_args args;
5819 unsigned long hashval = btrfs_inode_hash(location->objectid, root);
5820
5821 args.location = location;
5822 args.root = root;
5823
5824 inode = iget5_locked(s, hashval, btrfs_find_actor,
5825 btrfs_init_locked_inode,
5826 (void *)&args);
5827 return inode;
5828 }
5829
5830 /* Get an inode object given its location and corresponding root.
5831 * Returns in *is_new if the inode was read from disk
5832 */
5833 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
5834 struct btrfs_root *root, int *new)
5835 {
5836 struct inode *inode;
5837
5838 inode = btrfs_iget_locked(s, location, root);
5839 if (!inode)
5840 return ERR_PTR(-ENOMEM);
5841
5842 if (inode->i_state & I_NEW) {
5843 int ret;
5844
5845 ret = btrfs_read_locked_inode(inode);
5846 if (!is_bad_inode(inode)) {
5847 inode_tree_add(inode);
5848 unlock_new_inode(inode);
5849 if (new)
5850 *new = 1;
5851 } else {
5852 unlock_new_inode(inode);
5853 iput(inode);
5854 ASSERT(ret < 0);
5855 inode = ERR_PTR(ret < 0 ? ret : -ESTALE);
5856 }
5857 }
5858
5859 return inode;
5860 }
5861
5862 static struct inode *new_simple_dir(struct super_block *s,
5863 struct btrfs_key *key,
5864 struct btrfs_root *root)
5865 {
5866 struct inode *inode = new_inode(s);
5867
5868 if (!inode)
5869 return ERR_PTR(-ENOMEM);
5870
5871 BTRFS_I(inode)->root = root;
5872 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
5873 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
5874
5875 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
5876 inode->i_op = &btrfs_dir_ro_inode_operations;
5877 inode->i_opflags &= ~IOP_XATTR;
5878 inode->i_fop = &simple_dir_operations;
5879 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5880 inode->i_mtime = current_time(inode);
5881 inode->i_atime = inode->i_mtime;
5882 inode->i_ctime = inode->i_mtime;
5883 BTRFS_I(inode)->i_otime = inode->i_mtime;
5884
5885 return inode;
5886 }
5887
5888 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
5889 {
5890 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
5891 struct inode *inode;
5892 struct btrfs_root *root = BTRFS_I(dir)->root;
5893 struct btrfs_root *sub_root = root;
5894 struct btrfs_key location;
5895 int index;
5896 int ret = 0;
5897
5898 if (dentry->d_name.len > BTRFS_NAME_LEN)
5899 return ERR_PTR(-ENAMETOOLONG);
5900
5901 ret = btrfs_inode_by_name(dir, dentry, &location);
5902 if (ret < 0)
5903 return ERR_PTR(ret);
5904
5905 if (location.objectid == 0)
5906 return ERR_PTR(-ENOENT);
5907
5908 if (location.type == BTRFS_INODE_ITEM_KEY) {
5909 inode = btrfs_iget(dir->i_sb, &location, root, NULL);
5910 return inode;
5911 }
5912
5913 index = srcu_read_lock(&fs_info->subvol_srcu);
5914 ret = fixup_tree_root_location(fs_info, dir, dentry,
5915 &location, &sub_root);
5916 if (ret < 0) {
5917 if (ret != -ENOENT)
5918 inode = ERR_PTR(ret);
5919 else
5920 inode = new_simple_dir(dir->i_sb, &location, sub_root);
5921 } else {
5922 inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
5923 }
5924 srcu_read_unlock(&fs_info->subvol_srcu, index);
5925
5926 if (!IS_ERR(inode) && root != sub_root) {
5927 down_read(&fs_info->cleanup_work_sem);
5928 if (!sb_rdonly(inode->i_sb))
5929 ret = btrfs_orphan_cleanup(sub_root);
5930 up_read(&fs_info->cleanup_work_sem);
5931 if (ret) {
5932 iput(inode);
5933 inode = ERR_PTR(ret);
5934 }
5935 }
5936
5937 return inode;
5938 }
5939
5940 static int btrfs_dentry_delete(const struct dentry *dentry)
5941 {
5942 struct btrfs_root *root;
5943 struct inode *inode = d_inode(dentry);
5944
5945 if (!inode && !IS_ROOT(dentry))
5946 inode = d_inode(dentry->d_parent);
5947
5948 if (inode) {
5949 root = BTRFS_I(inode)->root;
5950 if (btrfs_root_refs(&root->root_item) == 0)
5951 return 1;
5952
5953 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5954 return 1;
5955 }
5956 return 0;
5957 }
5958
5959 static void btrfs_dentry_release(struct dentry *dentry)
5960 {
5961 kfree(dentry->d_fsdata);
5962 }
5963
5964 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
5965 unsigned int flags)
5966 {
5967 struct inode *inode;
5968
5969 inode = btrfs_lookup_dentry(dir, dentry);
5970 if (IS_ERR(inode)) {
5971 if (PTR_ERR(inode) == -ENOENT)
5972 inode = NULL;
5973 else
5974 return ERR_CAST(inode);
5975 }
5976
5977 return d_splice_alias(inode, dentry);
5978 }
5979
5980 unsigned char btrfs_filetype_table[] = {
5981 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
5982 };
5983
5984 /*
5985 * All this infrastructure exists because dir_emit can fault, and we are holding
5986 * the tree lock when doing readdir. For now just allocate a buffer and copy
5987 * our information into that, and then dir_emit from the buffer. This is
5988 * similar to what NFS does, only we don't keep the buffer around in pagecache
5989 * because I'm afraid I'll mess that up. Long term we need to make filldir do
5990 * copy_to_user_inatomic so we don't have to worry about page faulting under the
5991 * tree lock.
5992 */
5993 static int btrfs_opendir(struct inode *inode, struct file *file)
5994 {
5995 struct btrfs_file_private *private;
5996
5997 private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL);
5998 if (!private)
5999 return -ENOMEM;
6000 private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
6001 if (!private->filldir_buf) {
6002 kfree(private);
6003 return -ENOMEM;
6004 }
6005 file->private_data = private;
6006 return 0;
6007 }
6008
6009 struct dir_entry {
6010 u64 ino;
6011 u64 offset;
6012 unsigned type;
6013 int name_len;
6014 };
6015
6016 static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx)
6017 {
6018 while (entries--) {
6019 struct dir_entry *entry = addr;
6020 char *name = (char *)(entry + 1);
6021
6022 ctx->pos = get_unaligned(&entry->offset);
6023 if (!dir_emit(ctx, name, get_unaligned(&entry->name_len),
6024 get_unaligned(&entry->ino),
6025 get_unaligned(&entry->type)))
6026 return 1;
6027 addr += sizeof(struct dir_entry) +
6028 get_unaligned(&entry->name_len);
6029 ctx->pos++;
6030 }
6031 return 0;
6032 }
6033
6034 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
6035 {
6036 struct inode *inode = file_inode(file);
6037 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
6038 struct btrfs_root *root = BTRFS_I(inode)->root;
6039 struct btrfs_file_private *private = file->private_data;
6040 struct btrfs_dir_item *di;
6041 struct btrfs_key key;
6042 struct btrfs_key found_key;
6043 struct btrfs_path *path;
6044 void *addr;
6045 struct list_head ins_list;
6046 struct list_head del_list;
6047 int ret;
6048 struct extent_buffer *leaf;
6049 int slot;
6050 char *name_ptr;
6051 int name_len;
6052 int entries = 0;
6053 int total_len = 0;
6054 bool put = false;
6055 struct btrfs_key location;
6056
6057 if (!dir_emit_dots(file, ctx))
6058 return 0;
6059
6060 path = btrfs_alloc_path();
6061 if (!path)
6062 return -ENOMEM;
6063
6064 addr = private->filldir_buf;
6065 path->reada = READA_FORWARD;
6066
6067 INIT_LIST_HEAD(&ins_list);
6068 INIT_LIST_HEAD(&del_list);
6069 put = btrfs_readdir_get_delayed_items(inode, &ins_list, &del_list);
6070
6071 again:
6072 key.type = BTRFS_DIR_INDEX_KEY;
6073 key.offset = ctx->pos;
6074 key.objectid = btrfs_ino(BTRFS_I(inode));
6075
6076 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6077 if (ret < 0)
6078 goto err;
6079
6080 while (1) {
6081 struct dir_entry *entry;
6082
6083 leaf = path->nodes[0];
6084 slot = path->slots[0];
6085 if (slot >= btrfs_header_nritems(leaf)) {
6086 ret = btrfs_next_leaf(root, path);
6087 if (ret < 0)
6088 goto err;
6089 else if (ret > 0)
6090 break;
6091 continue;
6092 }
6093
6094 btrfs_item_key_to_cpu(leaf, &found_key, slot);
6095
6096 if (found_key.objectid != key.objectid)
6097 break;
6098 if (found_key.type != BTRFS_DIR_INDEX_KEY)
6099 break;
6100 if (found_key.offset < ctx->pos)
6101 goto next;
6102 if (btrfs_should_delete_dir_index(&del_list, found_key.offset))
6103 goto next;
6104 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
6105 if (verify_dir_item(fs_info, leaf, slot, di))
6106 goto next;
6107
6108 name_len = btrfs_dir_name_len(leaf, di);
6109 if ((total_len + sizeof(struct dir_entry) + name_len) >=
6110 PAGE_SIZE) {
6111 btrfs_release_path(path);
6112 ret = btrfs_filldir(private->filldir_buf, entries, ctx);
6113 if (ret)
6114 goto nopos;
6115 addr = private->filldir_buf;
6116 entries = 0;
6117 total_len = 0;
6118 goto again;
6119 }
6120
6121 entry = addr;
6122 put_unaligned(name_len, &entry->name_len);
6123 name_ptr = (char *)(entry + 1);
6124 read_extent_buffer(leaf, name_ptr, (unsigned long)(di + 1),
6125 name_len);
6126 put_unaligned(btrfs_filetype_table[btrfs_dir_type(leaf, di)],
6127 &entry->type);
6128 btrfs_dir_item_key_to_cpu(leaf, di, &location);
6129 put_unaligned(location.objectid, &entry->ino);
6130 put_unaligned(found_key.offset, &entry->offset);
6131 entries++;
6132 addr += sizeof(struct dir_entry) + name_len;
6133 total_len += sizeof(struct dir_entry) + name_len;
6134 next:
6135 path->slots[0]++;
6136 }
6137 btrfs_release_path(path);
6138
6139 ret = btrfs_filldir(private->filldir_buf, entries, ctx);
6140 if (ret)
6141 goto nopos;
6142
6143 ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
6144 if (ret)
6145 goto nopos;
6146
6147 /*
6148 * Stop new entries from being returned after we return the last
6149 * entry.
6150 *
6151 * New directory entries are assigned a strictly increasing
6152 * offset. This means that new entries created during readdir
6153 * are *guaranteed* to be seen in the future by that readdir.
6154 * This has broken buggy programs which operate on names as
6155 * they're returned by readdir. Until we re-use freed offsets
6156 * we have this hack to stop new entries from being returned
6157 * under the assumption that they'll never reach this huge
6158 * offset.
6159 *
6160 * This is being careful not to overflow 32bit loff_t unless the
6161 * last entry requires it because doing so has broken 32bit apps
6162 * in the past.
6163 */
6164 if (ctx->pos >= INT_MAX)
6165 ctx->pos = LLONG_MAX;
6166 else
6167 ctx->pos = INT_MAX;
6168 nopos:
6169 ret = 0;
6170 err:
6171 if (put)
6172 btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list);
6173 btrfs_free_path(path);
6174 return ret;
6175 }
6176
6177 /*
6178 * This is somewhat expensive, updating the tree every time the
6179 * inode changes. But, it is most likely to find the inode in cache.
6180 * FIXME, needs more benchmarking...there are no reasons other than performance
6181 * to keep or drop this code.
6182 */
6183 static int btrfs_dirty_inode(struct inode *inode)
6184 {
6185 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
6186 struct btrfs_root *root = BTRFS_I(inode)->root;
6187 struct btrfs_trans_handle *trans;
6188 int ret;
6189
6190 if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
6191 return 0;
6192
6193 trans = btrfs_join_transaction(root);
6194 if (IS_ERR(trans))
6195 return PTR_ERR(trans);
6196
6197 ret = btrfs_update_inode(trans, root, inode);
6198 if (ret && ret == -ENOSPC) {
6199 /* whoops, lets try again with the full transaction */
6200 btrfs_end_transaction(trans);
6201 trans = btrfs_start_transaction(root, 1);
6202 if (IS_ERR(trans))
6203 return PTR_ERR(trans);
6204
6205 ret = btrfs_update_inode(trans, root, inode);
6206 }
6207 btrfs_end_transaction(trans);
6208 if (BTRFS_I(inode)->delayed_node)
6209 btrfs_balance_delayed_items(fs_info);
6210
6211 return ret;
6212 }
6213
6214 /*
6215 * This is a copy of file_update_time. We need this so we can return error on
6216 * ENOSPC for updating the inode in the case of file write and mmap writes.
6217 */
6218 static int btrfs_update_time(struct inode *inode, struct timespec *now,
6219 int flags)
6220 {
6221 struct btrfs_root *root = BTRFS_I(inode)->root;
6222
6223 if (btrfs_root_readonly(root))
6224 return -EROFS;
6225
6226 if (flags & S_VERSION)
6227 inode_inc_iversion(inode);
6228 if (flags & S_CTIME)
6229 inode->i_ctime = *now;
6230 if (flags & S_MTIME)
6231 inode->i_mtime = *now;
6232 if (flags & S_ATIME)
6233 inode->i_atime = *now;
6234 return btrfs_dirty_inode(inode);
6235 }
6236
6237 /*
6238 * find the highest existing sequence number in a directory
6239 * and then set the in-memory index_cnt variable to reflect
6240 * free sequence numbers
6241 */
6242 static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
6243 {
6244 struct btrfs_root *root = inode->root;
6245 struct btrfs_key key, found_key;
6246 struct btrfs_path *path;
6247 struct extent_buffer *leaf;
6248 int ret;
6249
6250 key.objectid = btrfs_ino(inode);
6251 key.type = BTRFS_DIR_INDEX_KEY;
6252 key.offset = (u64)-1;
6253
6254 path = btrfs_alloc_path();
6255 if (!path)
6256 return -ENOMEM;
6257
6258 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6259 if (ret < 0)
6260 goto out;
6261 /* FIXME: we should be able to handle this */
6262 if (ret == 0)
6263 goto out;
6264 ret = 0;
6265
6266 /*
6267 * MAGIC NUMBER EXPLANATION:
6268 * since we search a directory based on f_pos we have to start at 2
6269 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
6270 * else has to start at 2
6271 */
6272 if (path->slots[0] == 0) {
6273 inode->index_cnt = 2;
6274 goto out;
6275 }
6276
6277 path->slots[0]--;
6278
6279 leaf = path->nodes[0];
6280 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6281
6282 if (found_key.objectid != btrfs_ino(inode) ||
6283 found_key.type != BTRFS_DIR_INDEX_KEY) {
6284 inode->index_cnt = 2;
6285 goto out;
6286 }
6287
6288 inode->index_cnt = found_key.offset + 1;
6289 out:
6290 btrfs_free_path(path);
6291 return ret;
6292 }
6293
6294 /*
6295 * helper to find a free sequence number in a given directory. This current
6296 * code is very simple, later versions will do smarter things in the btree
6297 */
6298 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index)
6299 {
6300 int ret = 0;
6301
6302 if (dir->index_cnt == (u64)-1) {
6303 ret = btrfs_inode_delayed_dir_index_count(dir);
6304 if (ret) {
6305 ret = btrfs_set_inode_index_count(dir);
6306 if (ret)
6307 return ret;
6308 }
6309 }
6310
6311 *index = dir->index_cnt;
6312 dir->index_cnt++;
6313
6314 return ret;
6315 }
6316
6317 static int btrfs_insert_inode_locked(struct inode *inode)
6318 {
6319 struct btrfs_iget_args args;
6320 args.location = &BTRFS_I(inode)->location;
6321 args.root = BTRFS_I(inode)->root;
6322
6323 return insert_inode_locked4(inode,
6324 btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
6325 btrfs_find_actor, &args);
6326 }
6327
6328 /*
6329 * Inherit flags from the parent inode.
6330 *
6331 * Currently only the compression flags and the cow flags are inherited.
6332 */
6333 static void btrfs_inherit_iflags(struct inode *inode, struct inode *dir)
6334 {
6335 unsigned int flags;
6336
6337 if (!dir)
6338 return;
6339
6340 flags = BTRFS_I(dir)->flags;
6341
6342 if (flags & BTRFS_INODE_NOCOMPRESS) {
6343 BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS;
6344 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
6345 } else if (flags & BTRFS_INODE_COMPRESS) {
6346 BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS;
6347 BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS;
6348 }
6349
6350 if (flags & BTRFS_INODE_NODATACOW) {
6351 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
6352 if (S_ISREG(inode->i_mode))
6353 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6354 }
6355
6356 btrfs_update_iflags(inode);
6357 }
6358
6359 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
6360 struct btrfs_root *root,
6361 struct inode *dir,
6362 const char *name, int name_len,
6363 u64 ref_objectid, u64 objectid,
6364 umode_t mode, u64 *index)
6365 {
6366 struct btrfs_fs_info *fs_info = root->fs_info;
6367 struct inode *inode;
6368 struct btrfs_inode_item *inode_item;
6369 struct btrfs_key *location;
6370 struct btrfs_path *path;
6371 struct btrfs_inode_ref *ref;
6372 struct btrfs_key key[2];
6373 u32 sizes[2];
6374 int nitems = name ? 2 : 1;
6375 unsigned long ptr;
6376 int ret;
6377
6378 path = btrfs_alloc_path();
6379 if (!path)
6380 return ERR_PTR(-ENOMEM);
6381
6382 inode = new_inode(fs_info->sb);
6383 if (!inode) {
6384 btrfs_free_path(path);
6385 return ERR_PTR(-ENOMEM);
6386 }
6387
6388 /*
6389 * O_TMPFILE, set link count to 0, so that after this point,
6390 * we fill in an inode item with the correct link count.
6391 */
6392 if (!name)
6393 set_nlink(inode, 0);
6394
6395 /*
6396 * we have to initialize this early, so we can reclaim the inode
6397 * number if we fail afterwards in this function.
6398 */
6399 inode->i_ino = objectid;
6400
6401 if (dir && name) {
6402 trace_btrfs_inode_request(dir);
6403
6404 ret = btrfs_set_inode_index(BTRFS_I(dir), index);
6405 if (ret) {
6406 btrfs_free_path(path);
6407 iput(inode);
6408 return ERR_PTR(ret);
6409 }
6410 } else if (dir) {
6411 *index = 0;
6412 }
6413 /*
6414 * index_cnt is ignored for everything but a dir,
6415 * btrfs_get_inode_index_count has an explanation for the magic
6416 * number
6417 */
6418 BTRFS_I(inode)->index_cnt = 2;
6419 BTRFS_I(inode)->dir_index = *index;
6420 BTRFS_I(inode)->root = root;
6421 BTRFS_I(inode)->generation = trans->transid;
6422 inode->i_generation = BTRFS_I(inode)->generation;
6423
6424 /*
6425 * We could have gotten an inode number from somebody who was fsynced
6426 * and then removed in this same transaction, so let's just set full
6427 * sync since it will be a full sync anyway and this will blow away the
6428 * old info in the log.
6429 */
6430 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
6431
6432 key[0].objectid = objectid;
6433 key[0].type = BTRFS_INODE_ITEM_KEY;
6434 key[0].offset = 0;
6435
6436 sizes[0] = sizeof(struct btrfs_inode_item);
6437
6438 if (name) {
6439 /*
6440 * Start new inodes with an inode_ref. This is slightly more
6441 * efficient for small numbers of hard links since they will
6442 * be packed into one item. Extended refs will kick in if we
6443 * add more hard links than can fit in the ref item.
6444 */
6445 key[1].objectid = objectid;
6446 key[1].type = BTRFS_INODE_REF_KEY;
6447 key[1].offset = ref_objectid;
6448
6449 sizes[1] = name_len + sizeof(*ref);
6450 }
6451
6452 location = &BTRFS_I(inode)->location;
6453 location->objectid = objectid;
6454 location->offset = 0;
6455 location->type = BTRFS_INODE_ITEM_KEY;
6456
6457 ret = btrfs_insert_inode_locked(inode);
6458 if (ret < 0)
6459 goto fail;
6460
6461 path->leave_spinning = 1;
6462 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems);
6463 if (ret != 0)
6464 goto fail_unlock;
6465
6466 inode_init_owner(inode, dir, mode);
6467 inode_set_bytes(inode, 0);
6468
6469 inode->i_mtime = current_time(inode);
6470 inode->i_atime = inode->i_mtime;
6471 inode->i_ctime = inode->i_mtime;
6472 BTRFS_I(inode)->i_otime = inode->i_mtime;
6473
6474 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
6475 struct btrfs_inode_item);
6476 memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item,
6477 sizeof(*inode_item));
6478 fill_inode_item(trans, path->nodes[0], inode_item, inode);
6479
6480 if (name) {
6481 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
6482 struct btrfs_inode_ref);
6483 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
6484 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
6485 ptr = (unsigned long)(ref + 1);
6486 write_extent_buffer(path->nodes[0], name, ptr, name_len);
6487 }
6488
6489 btrfs_mark_buffer_dirty(path->nodes[0]);
6490 btrfs_free_path(path);
6491
6492 btrfs_inherit_iflags(inode, dir);
6493
6494 if (S_ISREG(mode)) {
6495 if (btrfs_test_opt(fs_info, NODATASUM))
6496 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6497 if (btrfs_test_opt(fs_info, NODATACOW))
6498 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
6499 BTRFS_INODE_NODATASUM;
6500 }
6501
6502 inode_tree_add(inode);
6503
6504 trace_btrfs_inode_new(inode);
6505 btrfs_set_inode_last_trans(trans, inode);
6506
6507 btrfs_update_root_times(trans, root);
6508
6509 ret = btrfs_inode_inherit_props(trans, inode, dir);
6510 if (ret)
6511 btrfs_err(fs_info,
6512 "error inheriting props for ino %llu (root %llu): %d",
6513 btrfs_ino(BTRFS_I(inode)), root->root_key.objectid, ret);
6514
6515 return inode;
6516
6517 fail_unlock:
6518 unlock_new_inode(inode);
6519 fail:
6520 if (dir && name)
6521 BTRFS_I(dir)->index_cnt--;
6522 btrfs_free_path(path);
6523 iput(inode);
6524 return ERR_PTR(ret);
6525 }
6526
6527 static inline u8 btrfs_inode_type(struct inode *inode)
6528 {
6529 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
6530 }
6531
6532 /*
6533 * utility function to add 'inode' into 'parent_inode' with
6534 * a give name and a given sequence number.
6535 * if 'add_backref' is true, also insert a backref from the
6536 * inode to the parent directory.
6537 */
6538 int btrfs_add_link(struct btrfs_trans_handle *trans,
6539 struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
6540 const char *name, int name_len, int add_backref, u64 index)
6541 {
6542 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
6543 int ret = 0;
6544 struct btrfs_key key;
6545 struct btrfs_root *root = parent_inode->root;
6546 u64 ino = btrfs_ino(inode);
6547 u64 parent_ino = btrfs_ino(parent_inode);
6548
6549 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6550 memcpy(&key, &inode->root->root_key, sizeof(key));
6551 } else {
6552 key.objectid = ino;
6553 key.type = BTRFS_INODE_ITEM_KEY;
6554 key.offset = 0;
6555 }
6556
6557 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6558 ret = btrfs_add_root_ref(trans, fs_info, key.objectid,
6559 root->root_key.objectid, parent_ino,
6560 index, name, name_len);
6561 } else if (add_backref) {
6562 ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
6563 parent_ino, index);
6564 }
6565
6566 /* Nothing to clean up yet */
6567 if (ret)
6568 return ret;
6569
6570 ret = btrfs_insert_dir_item(trans, root, name, name_len,
6571 parent_inode, &key,
6572 btrfs_inode_type(&inode->vfs_inode), index);
6573 if (ret == -EEXIST || ret == -EOVERFLOW)
6574 goto fail_dir_item;
6575 else if (ret) {
6576 btrfs_abort_transaction(trans, ret);
6577 return ret;
6578 }
6579
6580 btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size +
6581 name_len * 2);
6582 inode_inc_iversion(&parent_inode->vfs_inode);
6583 parent_inode->vfs_inode.i_mtime = parent_inode->vfs_inode.i_ctime =
6584 current_time(&parent_inode->vfs_inode);
6585 ret = btrfs_update_inode(trans, root, &parent_inode->vfs_inode);
6586 if (ret)
6587 btrfs_abort_transaction(trans, ret);
6588 return ret;
6589
6590 fail_dir_item:
6591 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6592 u64 local_index;
6593 int err;
6594 err = btrfs_del_root_ref(trans, fs_info, key.objectid,
6595 root->root_key.objectid, parent_ino,
6596 &local_index, name, name_len);
6597 if (err)
6598 btrfs_abort_transaction(trans, err);
6599 } else if (add_backref) {
6600 u64 local_index;
6601 int err;
6602
6603 err = btrfs_del_inode_ref(trans, root, name, name_len,
6604 ino, parent_ino, &local_index);
6605 if (err)
6606 btrfs_abort_transaction(trans, err);
6607 }
6608
6609 /* Return the original error code */
6610 return ret;
6611 }
6612
6613 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
6614 struct btrfs_inode *dir, struct dentry *dentry,
6615 struct btrfs_inode *inode, int backref, u64 index)
6616 {
6617 int err = btrfs_add_link(trans, dir, inode,
6618 dentry->d_name.name, dentry->d_name.len,
6619 backref, index);
6620 if (err > 0)
6621 err = -EEXIST;
6622 return err;
6623 }
6624
6625 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
6626 umode_t mode, dev_t rdev)
6627 {
6628 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6629 struct btrfs_trans_handle *trans;
6630 struct btrfs_root *root = BTRFS_I(dir)->root;
6631 struct inode *inode = NULL;
6632 int err;
6633 int drop_inode = 0;
6634 u64 objectid;
6635 u64 index = 0;
6636
6637 /*
6638 * 2 for inode item and ref
6639 * 2 for dir items
6640 * 1 for xattr if selinux is on
6641 */
6642 trans = btrfs_start_transaction(root, 5);
6643 if (IS_ERR(trans))
6644 return PTR_ERR(trans);
6645
6646 err = btrfs_find_free_ino(root, &objectid);
6647 if (err)
6648 goto out_unlock;
6649
6650 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6651 dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid,
6652 mode, &index);
6653 if (IS_ERR(inode)) {
6654 err = PTR_ERR(inode);
6655 goto out_unlock;
6656 }
6657
6658 /*
6659 * If the active LSM wants to access the inode during
6660 * d_instantiate it needs these. Smack checks to see
6661 * if the filesystem supports xattrs by looking at the
6662 * ops vector.
6663 */
6664 inode->i_op = &btrfs_special_inode_operations;
6665 init_special_inode(inode, inode->i_mode, rdev);
6666
6667 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6668 if (err)
6669 goto out_unlock_inode;
6670
6671 err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
6672 0, index);
6673 if (err) {
6674 goto out_unlock_inode;
6675 } else {
6676 btrfs_update_inode(trans, root, inode);
6677 d_instantiate_new(dentry, inode);
6678 }
6679
6680 out_unlock:
6681 btrfs_end_transaction(trans);
6682 btrfs_balance_delayed_items(fs_info);
6683 btrfs_btree_balance_dirty(fs_info);
6684 if (drop_inode) {
6685 inode_dec_link_count(inode);
6686 iput(inode);
6687 }
6688 return err;
6689
6690 out_unlock_inode:
6691 drop_inode = 1;
6692 unlock_new_inode(inode);
6693 goto out_unlock;
6694
6695 }
6696
6697 static int btrfs_create(struct inode *dir, struct dentry *dentry,
6698 umode_t mode, bool excl)
6699 {
6700 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6701 struct btrfs_trans_handle *trans;
6702 struct btrfs_root *root = BTRFS_I(dir)->root;
6703 struct inode *inode = NULL;
6704 int drop_inode_on_err = 0;
6705 int err;
6706 u64 objectid;
6707 u64 index = 0;
6708
6709 /*
6710 * 2 for inode item and ref
6711 * 2 for dir items
6712 * 1 for xattr if selinux is on
6713 */
6714 trans = btrfs_start_transaction(root, 5);
6715 if (IS_ERR(trans))
6716 return PTR_ERR(trans);
6717
6718 err = btrfs_find_free_ino(root, &objectid);
6719 if (err)
6720 goto out_unlock;
6721
6722 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6723 dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid,
6724 mode, &index);
6725 if (IS_ERR(inode)) {
6726 err = PTR_ERR(inode);
6727 goto out_unlock;
6728 }
6729 drop_inode_on_err = 1;
6730 /*
6731 * If the active LSM wants to access the inode during
6732 * d_instantiate it needs these. Smack checks to see
6733 * if the filesystem supports xattrs by looking at the
6734 * ops vector.
6735 */
6736 inode->i_fop = &btrfs_file_operations;
6737 inode->i_op = &btrfs_file_inode_operations;
6738 inode->i_mapping->a_ops = &btrfs_aops;
6739
6740 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6741 if (err)
6742 goto out_unlock_inode;
6743
6744 err = btrfs_update_inode(trans, root, inode);
6745 if (err)
6746 goto out_unlock_inode;
6747
6748 err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
6749 0, index);
6750 if (err)
6751 goto out_unlock_inode;
6752
6753 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
6754 d_instantiate_new(dentry, inode);
6755
6756 out_unlock:
6757 btrfs_end_transaction(trans);
6758 if (err && drop_inode_on_err) {
6759 inode_dec_link_count(inode);
6760 iput(inode);
6761 }
6762 btrfs_balance_delayed_items(fs_info);
6763 btrfs_btree_balance_dirty(fs_info);
6764 return err;
6765
6766 out_unlock_inode:
6767 unlock_new_inode(inode);
6768 goto out_unlock;
6769
6770 }
6771
6772 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
6773 struct dentry *dentry)
6774 {
6775 struct btrfs_trans_handle *trans = NULL;
6776 struct btrfs_root *root = BTRFS_I(dir)->root;
6777 struct inode *inode = d_inode(old_dentry);
6778 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
6779 u64 index;
6780 int err;
6781 int drop_inode = 0;
6782
6783 /* do not allow sys_link's with other subvols of the same device */
6784 if (root->objectid != BTRFS_I(inode)->root->objectid)
6785 return -EXDEV;
6786
6787 if (inode->i_nlink >= BTRFS_LINK_MAX)
6788 return -EMLINK;
6789
6790 err = btrfs_set_inode_index(BTRFS_I(dir), &index);
6791 if (err)
6792 goto fail;
6793
6794 /*
6795 * 2 items for inode and inode ref
6796 * 2 items for dir items
6797 * 1 item for parent inode
6798 */
6799 trans = btrfs_start_transaction(root, 5);
6800 if (IS_ERR(trans)) {
6801 err = PTR_ERR(trans);
6802 trans = NULL;
6803 goto fail;
6804 }
6805
6806 /* There are several dir indexes for this inode, clear the cache. */
6807 BTRFS_I(inode)->dir_index = 0ULL;
6808 inc_nlink(inode);
6809 inode_inc_iversion(inode);
6810 inode->i_ctime = current_time(inode);
6811 ihold(inode);
6812 set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
6813
6814 err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
6815 1, index);
6816
6817 if (err) {
6818 drop_inode = 1;
6819 } else {
6820 struct dentry *parent = dentry->d_parent;
6821 err = btrfs_update_inode(trans, root, inode);
6822 if (err)
6823 goto fail;
6824 if (inode->i_nlink == 1) {
6825 /*
6826 * If new hard link count is 1, it's a file created
6827 * with open(2) O_TMPFILE flag.
6828 */
6829 err = btrfs_orphan_del(trans, BTRFS_I(inode));
6830 if (err)
6831 goto fail;
6832 }
6833 BTRFS_I(inode)->last_link_trans = trans->transid;
6834 d_instantiate(dentry, inode);
6835 btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent);
6836 }
6837
6838 btrfs_balance_delayed_items(fs_info);
6839 fail:
6840 if (trans)
6841 btrfs_end_transaction(trans);
6842 if (drop_inode) {
6843 inode_dec_link_count(inode);
6844 iput(inode);
6845 }
6846 btrfs_btree_balance_dirty(fs_info);
6847 return err;
6848 }
6849
6850 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
6851 {
6852 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6853 struct inode *inode = NULL;
6854 struct btrfs_trans_handle *trans;
6855 struct btrfs_root *root = BTRFS_I(dir)->root;
6856 int err = 0;
6857 int drop_on_err = 0;
6858 u64 objectid = 0;
6859 u64 index = 0;
6860
6861 /*
6862 * 2 items for inode and ref
6863 * 2 items for dir items
6864 * 1 for xattr if selinux is on
6865 */
6866 trans = btrfs_start_transaction(root, 5);
6867 if (IS_ERR(trans))
6868 return PTR_ERR(trans);
6869
6870 err = btrfs_find_free_ino(root, &objectid);
6871 if (err)
6872 goto out_fail;
6873
6874 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6875 dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid,
6876 S_IFDIR | mode, &index);
6877 if (IS_ERR(inode)) {
6878 err = PTR_ERR(inode);
6879 goto out_fail;
6880 }
6881
6882 drop_on_err = 1;
6883 /* these must be set before we unlock the inode */
6884 inode->i_op = &btrfs_dir_inode_operations;
6885 inode->i_fop = &btrfs_dir_file_operations;
6886
6887 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6888 if (err)
6889 goto out_fail_inode;
6890
6891 btrfs_i_size_write(BTRFS_I(inode), 0);
6892 err = btrfs_update_inode(trans, root, inode);
6893 if (err)
6894 goto out_fail_inode;
6895
6896 err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
6897 dentry->d_name.name,
6898 dentry->d_name.len, 0, index);
6899 if (err)
6900 goto out_fail_inode;
6901
6902 d_instantiate_new(dentry, inode);
6903 drop_on_err = 0;
6904
6905 out_fail:
6906 btrfs_end_transaction(trans);
6907 if (drop_on_err) {
6908 inode_dec_link_count(inode);
6909 iput(inode);
6910 }
6911 btrfs_balance_delayed_items(fs_info);
6912 btrfs_btree_balance_dirty(fs_info);
6913 return err;
6914
6915 out_fail_inode:
6916 unlock_new_inode(inode);
6917 goto out_fail;
6918 }
6919
6920 /* Find next extent map of a given extent map, caller needs to ensure locks */
6921 static struct extent_map *next_extent_map(struct extent_map *em)
6922 {
6923 struct rb_node *next;
6924
6925 next = rb_next(&em->rb_node);
6926 if (!next)
6927 return NULL;
6928 return container_of(next, struct extent_map, rb_node);
6929 }
6930
6931 static struct extent_map *prev_extent_map(struct extent_map *em)
6932 {
6933 struct rb_node *prev;
6934
6935 prev = rb_prev(&em->rb_node);
6936 if (!prev)
6937 return NULL;
6938 return container_of(prev, struct extent_map, rb_node);
6939 }
6940
6941 /* helper for btfs_get_extent. Given an existing extent in the tree,
6942 * the existing extent is the nearest extent to map_start,
6943 * and an extent that you want to insert, deal with overlap and insert
6944 * the best fitted new extent into the tree.
6945 */
6946 static int merge_extent_mapping(struct extent_map_tree *em_tree,
6947 struct extent_map *existing,
6948 struct extent_map *em,
6949 u64 map_start)
6950 {
6951 struct extent_map *prev;
6952 struct extent_map *next;
6953 u64 start;
6954 u64 end;
6955 u64 start_diff;
6956
6957 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
6958
6959 if (existing->start > map_start) {
6960 next = existing;
6961 prev = prev_extent_map(next);
6962 } else {
6963 prev = existing;
6964 next = next_extent_map(prev);
6965 }
6966
6967 start = prev ? extent_map_end(prev) : em->start;
6968 start = max_t(u64, start, em->start);
6969 end = next ? next->start : extent_map_end(em);
6970 end = min_t(u64, end, extent_map_end(em));
6971 start_diff = start - em->start;
6972 em->start = start;
6973 em->len = end - start;
6974 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
6975 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
6976 em->block_start += start_diff;
6977 em->block_len -= start_diff;
6978 }
6979 return add_extent_mapping(em_tree, em, 0);
6980 }
6981
6982 static noinline int uncompress_inline(struct btrfs_path *path,
6983 struct page *page,
6984 size_t pg_offset, u64 extent_offset,
6985 struct btrfs_file_extent_item *item)
6986 {
6987 int ret;
6988 struct extent_buffer *leaf = path->nodes[0];
6989 char *tmp;
6990 size_t max_size;
6991 unsigned long inline_size;
6992 unsigned long ptr;
6993 int compress_type;
6994
6995 WARN_ON(pg_offset != 0);
6996 compress_type = btrfs_file_extent_compression(leaf, item);
6997 max_size = btrfs_file_extent_ram_bytes(leaf, item);
6998 inline_size = btrfs_file_extent_inline_item_len(leaf,
6999 btrfs_item_nr(path->slots[0]));
7000 tmp = kmalloc(inline_size, GFP_NOFS);
7001 if (!tmp)
7002 return -ENOMEM;
7003 ptr = btrfs_file_extent_inline_start(item);
7004
7005 read_extent_buffer(leaf, tmp, ptr, inline_size);
7006
7007 max_size = min_t(unsigned long, PAGE_SIZE, max_size);
7008 ret = btrfs_decompress(compress_type, tmp, page,
7009 extent_offset, inline_size, max_size);
7010
7011 /*
7012 * decompression code contains a memset to fill in any space between the end
7013 * of the uncompressed data and the end of max_size in case the decompressed
7014 * data ends up shorter than ram_bytes. That doesn't cover the hole between
7015 * the end of an inline extent and the beginning of the next block, so we
7016 * cover that region here.
7017 */
7018
7019 if (max_size + pg_offset < PAGE_SIZE) {
7020 char *map = kmap(page);
7021 memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset);
7022 kunmap(page);
7023 }
7024 kfree(tmp);
7025 return ret;
7026 }
7027
7028 /*
7029 * a bit scary, this does extent mapping from logical file offset to the disk.
7030 * the ugly parts come from merging extents from the disk with the in-ram
7031 * representation. This gets more complex because of the data=ordered code,
7032 * where the in-ram extents might be locked pending data=ordered completion.
7033 *
7034 * This also copies inline extents directly into the page.
7035 */
7036 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
7037 struct page *page,
7038 size_t pg_offset, u64 start, u64 len,
7039 int create)
7040 {
7041 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
7042 int ret;
7043 int err = 0;
7044 u64 extent_start = 0;
7045 u64 extent_end = 0;
7046 u64 objectid = btrfs_ino(inode);
7047 u32 found_type;
7048 struct btrfs_path *path = NULL;
7049 struct btrfs_root *root = inode->root;
7050 struct btrfs_file_extent_item *item;
7051 struct extent_buffer *leaf;
7052 struct btrfs_key found_key;
7053 struct extent_map *em = NULL;
7054 struct extent_map_tree *em_tree = &inode->extent_tree;
7055 struct extent_io_tree *io_tree = &inode->io_tree;
7056 struct btrfs_trans_handle *trans = NULL;
7057 const bool new_inline = !page || create;
7058
7059 again:
7060 read_lock(&em_tree->lock);
7061 em = lookup_extent_mapping(em_tree, start, len);
7062 if (em)
7063 em->bdev = fs_info->fs_devices->latest_bdev;
7064 read_unlock(&em_tree->lock);
7065
7066 if (em) {
7067 if (em->start > start || em->start + em->len <= start)
7068 free_extent_map(em);
7069 else if (em->block_start == EXTENT_MAP_INLINE && page)
7070 free_extent_map(em);
7071 else
7072 goto out;
7073 }
7074 em = alloc_extent_map();
7075 if (!em) {
7076 err = -ENOMEM;
7077 goto out;
7078 }
7079 em->bdev = fs_info->fs_devices->latest_bdev;
7080 em->start = EXTENT_MAP_HOLE;
7081 em->orig_start = EXTENT_MAP_HOLE;
7082 em->len = (u64)-1;
7083 em->block_len = (u64)-1;
7084
7085 if (!path) {
7086 path = btrfs_alloc_path();
7087 if (!path) {
7088 err = -ENOMEM;
7089 goto out;
7090 }
7091 /*
7092 * Chances are we'll be called again, so go ahead and do
7093 * readahead
7094 */
7095 path->reada = READA_FORWARD;
7096 }
7097
7098 ret = btrfs_lookup_file_extent(trans, root, path,
7099 objectid, start, trans != NULL);
7100 if (ret < 0) {
7101 err = ret;
7102 goto out;
7103 }
7104
7105 if (ret != 0) {
7106 if (path->slots[0] == 0)
7107 goto not_found;
7108 path->slots[0]--;
7109 }
7110
7111 leaf = path->nodes[0];
7112 item = btrfs_item_ptr(leaf, path->slots[0],
7113 struct btrfs_file_extent_item);
7114 /* are we inside the extent that was found? */
7115 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7116 found_type = found_key.type;
7117 if (found_key.objectid != objectid ||
7118 found_type != BTRFS_EXTENT_DATA_KEY) {
7119 /*
7120 * If we backup past the first extent we want to move forward
7121 * and see if there is an extent in front of us, otherwise we'll
7122 * say there is a hole for our whole search range which can
7123 * cause problems.
7124 */
7125 extent_end = start;
7126 goto next;
7127 }
7128
7129 found_type = btrfs_file_extent_type(leaf, item);
7130 extent_start = found_key.offset;
7131 if (found_type == BTRFS_FILE_EXTENT_REG ||
7132 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
7133 extent_end = extent_start +
7134 btrfs_file_extent_num_bytes(leaf, item);
7135
7136 trace_btrfs_get_extent_show_fi_regular(inode, leaf, item,
7137 extent_start);
7138 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
7139 size_t size;
7140 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
7141 extent_end = ALIGN(extent_start + size,
7142 fs_info->sectorsize);
7143
7144 trace_btrfs_get_extent_show_fi_inline(inode, leaf, item,
7145 path->slots[0],
7146 extent_start);
7147 }
7148 next:
7149 if (start >= extent_end) {
7150 path->slots[0]++;
7151 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
7152 ret = btrfs_next_leaf(root, path);
7153 if (ret < 0) {
7154 err = ret;
7155 goto out;
7156 }
7157 if (ret > 0)
7158 goto not_found;
7159 leaf = path->nodes[0];
7160 }
7161 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7162 if (found_key.objectid != objectid ||
7163 found_key.type != BTRFS_EXTENT_DATA_KEY)
7164 goto not_found;
7165 if (start + len <= found_key.offset)
7166 goto not_found;
7167 if (start > found_key.offset)
7168 goto next;
7169 em->start = start;
7170 em->orig_start = start;
7171 em->len = found_key.offset - start;
7172 goto not_found_em;
7173 }
7174
7175 btrfs_extent_item_to_extent_map(inode, path, item,
7176 new_inline, em);
7177
7178 if (found_type == BTRFS_FILE_EXTENT_REG ||
7179 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
7180 goto insert;
7181 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
7182 unsigned long ptr;
7183 char *map;
7184 size_t size;
7185 size_t extent_offset;
7186 size_t copy_size;
7187
7188 if (new_inline)
7189 goto out;
7190
7191 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
7192 extent_offset = page_offset(page) + pg_offset - extent_start;
7193 copy_size = min_t(u64, PAGE_SIZE - pg_offset,
7194 size - extent_offset);
7195 em->start = extent_start + extent_offset;
7196 em->len = ALIGN(copy_size, fs_info->sectorsize);
7197 em->orig_block_len = em->len;
7198 em->orig_start = em->start;
7199 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
7200 if (create == 0 && !PageUptodate(page)) {
7201 if (btrfs_file_extent_compression(leaf, item) !=
7202 BTRFS_COMPRESS_NONE) {
7203 ret = uncompress_inline(path, page, pg_offset,
7204 extent_offset, item);
7205 if (ret) {
7206 err = ret;
7207 goto out;
7208 }
7209 } else {
7210 map = kmap(page);
7211 read_extent_buffer(leaf, map + pg_offset, ptr,
7212 copy_size);
7213 if (pg_offset + copy_size < PAGE_SIZE) {
7214 memset(map + pg_offset + copy_size, 0,
7215 PAGE_SIZE - pg_offset -
7216 copy_size);
7217 }
7218 kunmap(page);
7219 }
7220 flush_dcache_page(page);
7221 } else if (create && PageUptodate(page)) {
7222 BUG();
7223 if (!trans) {
7224 kunmap(page);
7225 free_extent_map(em);
7226 em = NULL;
7227
7228 btrfs_release_path(path);
7229 trans = btrfs_join_transaction(root);
7230
7231 if (IS_ERR(trans))
7232 return ERR_CAST(trans);
7233 goto again;
7234 }
7235 map = kmap(page);
7236 write_extent_buffer(leaf, map + pg_offset, ptr,
7237 copy_size);
7238 kunmap(page);
7239 btrfs_mark_buffer_dirty(leaf);
7240 }
7241 set_extent_uptodate(io_tree, em->start,
7242 extent_map_end(em) - 1, NULL, GFP_NOFS);
7243 goto insert;
7244 }
7245 not_found:
7246 em->start = start;
7247 em->orig_start = start;
7248 em->len = len;
7249 not_found_em:
7250 em->block_start = EXTENT_MAP_HOLE;
7251 set_bit(EXTENT_FLAG_VACANCY, &em->flags);
7252 insert:
7253 btrfs_release_path(path);
7254 if (em->start > start || extent_map_end(em) <= start) {
7255 btrfs_err(fs_info,
7256 "bad extent! em: [%llu %llu] passed [%llu %llu]",
7257 em->start, em->len, start, len);
7258 err = -EIO;
7259 goto out;
7260 }
7261
7262 err = 0;
7263 write_lock(&em_tree->lock);
7264 ret = add_extent_mapping(em_tree, em, 0);
7265 /* it is possible that someone inserted the extent into the tree
7266 * while we had the lock dropped. It is also possible that
7267 * an overlapping map exists in the tree
7268 */
7269 if (ret == -EEXIST) {
7270 struct extent_map *existing;
7271
7272 ret = 0;
7273
7274 existing = search_extent_mapping(em_tree, start, len);
7275 /*
7276 * existing will always be non-NULL, since there must be
7277 * extent causing the -EEXIST.
7278 */
7279 if (start >= existing->start &&
7280 start < extent_map_end(existing)) {
7281 free_extent_map(em);
7282 em = existing;
7283 err = 0;
7284 } else {
7285 /*
7286 * The existing extent map is the one nearest to
7287 * the [start, start + len) range which overlaps
7288 */
7289 err = merge_extent_mapping(em_tree, existing,
7290 em, start);
7291 free_extent_map(existing);
7292 if (err) {
7293 free_extent_map(em);
7294 em = NULL;
7295 }
7296 }
7297 }
7298 write_unlock(&em_tree->lock);
7299 out:
7300
7301 trace_btrfs_get_extent(root, inode, em);
7302
7303 btrfs_free_path(path);
7304 if (trans) {
7305 ret = btrfs_end_transaction(trans);
7306 if (!err)
7307 err = ret;
7308 }
7309 if (err) {
7310 free_extent_map(em);
7311 return ERR_PTR(err);
7312 }
7313 BUG_ON(!em); /* Error is always set */
7314 return em;
7315 }
7316
7317 struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode,
7318 struct page *page,
7319 size_t pg_offset, u64 start, u64 len,
7320 int create)
7321 {
7322 struct extent_map *em;
7323 struct extent_map *hole_em = NULL;
7324 u64 range_start = start;
7325 u64 end;
7326 u64 found;
7327 u64 found_end;
7328 int err = 0;
7329
7330 em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
7331 if (IS_ERR(em))
7332 return em;
7333 /*
7334 * If our em maps to:
7335 * - a hole or
7336 * - a pre-alloc extent,
7337 * there might actually be delalloc bytes behind it.
7338 */
7339 if (em->block_start != EXTENT_MAP_HOLE &&
7340 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7341 return em;
7342 else
7343 hole_em = em;
7344
7345 /* check to see if we've wrapped (len == -1 or similar) */
7346 end = start + len;
7347 if (end < start)
7348 end = (u64)-1;
7349 else
7350 end -= 1;
7351
7352 em = NULL;
7353
7354 /* ok, we didn't find anything, lets look for delalloc */
7355 found = count_range_bits(&inode->io_tree, &range_start,
7356 end, len, EXTENT_DELALLOC, 1);
7357 found_end = range_start + found;
7358 if (found_end < range_start)
7359 found_end = (u64)-1;
7360
7361 /*
7362 * we didn't find anything useful, return
7363 * the original results from get_extent()
7364 */
7365 if (range_start > end || found_end <= start) {
7366 em = hole_em;
7367 hole_em = NULL;
7368 goto out;
7369 }
7370
7371 /* adjust the range_start to make sure it doesn't
7372 * go backwards from the start they passed in
7373 */
7374 range_start = max(start, range_start);
7375 found = found_end - range_start;
7376
7377 if (found > 0) {
7378 u64 hole_start = start;
7379 u64 hole_len = len;
7380
7381 em = alloc_extent_map();
7382 if (!em) {
7383 err = -ENOMEM;
7384 goto out;
7385 }
7386 /*
7387 * when btrfs_get_extent can't find anything it
7388 * returns one huge hole
7389 *
7390 * make sure what it found really fits our range, and
7391 * adjust to make sure it is based on the start from
7392 * the caller
7393 */
7394 if (hole_em) {
7395 u64 calc_end = extent_map_end(hole_em);
7396
7397 if (calc_end <= start || (hole_em->start > end)) {
7398 free_extent_map(hole_em);
7399 hole_em = NULL;
7400 } else {
7401 hole_start = max(hole_em->start, start);
7402 hole_len = calc_end - hole_start;
7403 }
7404 }
7405 em->bdev = NULL;
7406 if (hole_em && range_start > hole_start) {
7407 /* our hole starts before our delalloc, so we
7408 * have to return just the parts of the hole
7409 * that go until the delalloc starts
7410 */
7411 em->len = min(hole_len,
7412 range_start - hole_start);
7413 em->start = hole_start;
7414 em->orig_start = hole_start;
7415 /*
7416 * don't adjust block start at all,
7417 * it is fixed at EXTENT_MAP_HOLE
7418 */
7419 em->block_start = hole_em->block_start;
7420 em->block_len = hole_len;
7421 if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags))
7422 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
7423 } else {
7424 em->start = range_start;
7425 em->len = found;
7426 em->orig_start = range_start;
7427 em->block_start = EXTENT_MAP_DELALLOC;
7428 em->block_len = found;
7429 }
7430 } else if (hole_em) {
7431 return hole_em;
7432 }
7433 out:
7434
7435 free_extent_map(hole_em);
7436 if (err) {
7437 free_extent_map(em);
7438 return ERR_PTR(err);
7439 }
7440 return em;
7441 }
7442
7443 static struct extent_map *btrfs_create_dio_extent(struct inode *inode,
7444 const u64 start,
7445 const u64 len,
7446 const u64 orig_start,
7447 const u64 block_start,
7448 const u64 block_len,
7449 const u64 orig_block_len,
7450 const u64 ram_bytes,
7451 const int type)
7452 {
7453 struct extent_map *em = NULL;
7454 int ret;
7455
7456 if (type != BTRFS_ORDERED_NOCOW) {
7457 em = create_io_em(inode, start, len, orig_start,
7458 block_start, block_len, orig_block_len,
7459 ram_bytes,
7460 BTRFS_COMPRESS_NONE, /* compress_type */
7461 type);
7462 if (IS_ERR(em))
7463 goto out;
7464 }
7465 ret = btrfs_add_ordered_extent_dio(inode, start, block_start,
7466 len, block_len, type);
7467 if (ret) {
7468 if (em) {
7469 free_extent_map(em);
7470 btrfs_drop_extent_cache(BTRFS_I(inode), start,
7471 start + len - 1, 0);
7472 }
7473 em = ERR_PTR(ret);
7474 }
7475 out:
7476
7477 return em;
7478 }
7479
7480 static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
7481 u64 start, u64 len)
7482 {
7483 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7484 struct btrfs_root *root = BTRFS_I(inode)->root;
7485 struct extent_map *em;
7486 struct btrfs_key ins;
7487 u64 alloc_hint;
7488 int ret;
7489
7490 alloc_hint = get_extent_allocation_hint(inode, start, len);
7491 ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize,
7492 0, alloc_hint, &ins, 1, 1);
7493 if (ret)
7494 return ERR_PTR(ret);
7495
7496 em = btrfs_create_dio_extent(inode, start, ins.offset, start,
7497 ins.objectid, ins.offset, ins.offset,
7498 ins.offset, BTRFS_ORDERED_REGULAR);
7499 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
7500 if (IS_ERR(em))
7501 btrfs_free_reserved_extent(fs_info, ins.objectid,
7502 ins.offset, 1);
7503
7504 return em;
7505 }
7506
7507 /*
7508 * returns 1 when the nocow is safe, < 1 on error, 0 if the
7509 * block must be cow'd
7510 */
7511 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
7512 u64 *orig_start, u64 *orig_block_len,
7513 u64 *ram_bytes)
7514 {
7515 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7516 struct btrfs_path *path;
7517 int ret;
7518 struct extent_buffer *leaf;
7519 struct btrfs_root *root = BTRFS_I(inode)->root;
7520 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7521 struct btrfs_file_extent_item *fi;
7522 struct btrfs_key key;
7523 u64 disk_bytenr;
7524 u64 backref_offset;
7525 u64 extent_end;
7526 u64 num_bytes;
7527 int slot;
7528 int found_type;
7529 bool nocow = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW);
7530
7531 path = btrfs_alloc_path();
7532 if (!path)
7533 return -ENOMEM;
7534
7535 ret = btrfs_lookup_file_extent(NULL, root, path,
7536 btrfs_ino(BTRFS_I(inode)), offset, 0);
7537 if (ret < 0)
7538 goto out;
7539
7540 slot = path->slots[0];
7541 if (ret == 1) {
7542 if (slot == 0) {
7543 /* can't find the item, must cow */
7544 ret = 0;
7545 goto out;
7546 }
7547 slot--;
7548 }
7549 ret = 0;
7550 leaf = path->nodes[0];
7551 btrfs_item_key_to_cpu(leaf, &key, slot);
7552 if (key.objectid != btrfs_ino(BTRFS_I(inode)) ||
7553 key.type != BTRFS_EXTENT_DATA_KEY) {
7554 /* not our file or wrong item type, must cow */
7555 goto out;
7556 }
7557
7558 if (key.offset > offset) {
7559 /* Wrong offset, must cow */
7560 goto out;
7561 }
7562
7563 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
7564 found_type = btrfs_file_extent_type(leaf, fi);
7565 if (found_type != BTRFS_FILE_EXTENT_REG &&
7566 found_type != BTRFS_FILE_EXTENT_PREALLOC) {
7567 /* not a regular extent, must cow */
7568 goto out;
7569 }
7570
7571 if (!nocow && found_type == BTRFS_FILE_EXTENT_REG)
7572 goto out;
7573
7574 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
7575 if (extent_end <= offset)
7576 goto out;
7577
7578 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
7579 if (disk_bytenr == 0)
7580 goto out;
7581
7582 if (btrfs_file_extent_compression(leaf, fi) ||
7583 btrfs_file_extent_encryption(leaf, fi) ||
7584 btrfs_file_extent_other_encoding(leaf, fi))
7585 goto out;
7586
7587 backref_offset = btrfs_file_extent_offset(leaf, fi);
7588
7589 if (orig_start) {
7590 *orig_start = key.offset - backref_offset;
7591 *orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
7592 *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
7593 }
7594
7595 if (btrfs_extent_readonly(fs_info, disk_bytenr))
7596 goto out;
7597
7598 num_bytes = min(offset + *len, extent_end) - offset;
7599 if (!nocow && found_type == BTRFS_FILE_EXTENT_PREALLOC) {
7600 u64 range_end;
7601
7602 range_end = round_up(offset + num_bytes,
7603 root->fs_info->sectorsize) - 1;
7604 ret = test_range_bit(io_tree, offset, range_end,
7605 EXTENT_DELALLOC, 0, NULL);
7606 if (ret) {
7607 ret = -EAGAIN;
7608 goto out;
7609 }
7610 }
7611
7612 btrfs_release_path(path);
7613
7614 /*
7615 * look for other files referencing this extent, if we
7616 * find any we must cow
7617 */
7618
7619 ret = btrfs_cross_ref_exist(root, btrfs_ino(BTRFS_I(inode)),
7620 key.offset - backref_offset, disk_bytenr);
7621 if (ret) {
7622 ret = 0;
7623 goto out;
7624 }
7625
7626 /*
7627 * adjust disk_bytenr and num_bytes to cover just the bytes
7628 * in this extent we are about to write. If there
7629 * are any csums in that range we have to cow in order
7630 * to keep the csums correct
7631 */
7632 disk_bytenr += backref_offset;
7633 disk_bytenr += offset - key.offset;
7634 if (csum_exist_in_range(fs_info, disk_bytenr, num_bytes))
7635 goto out;
7636 /*
7637 * all of the above have passed, it is safe to overwrite this extent
7638 * without cow
7639 */
7640 *len = num_bytes;
7641 ret = 1;
7642 out:
7643 btrfs_free_path(path);
7644 return ret;
7645 }
7646
7647 bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
7648 {
7649 struct radix_tree_root *root = &inode->i_mapping->page_tree;
7650 bool found = false;
7651 void **pagep = NULL;
7652 struct page *page = NULL;
7653 unsigned long start_idx;
7654 unsigned long end_idx;
7655
7656 start_idx = start >> PAGE_SHIFT;
7657
7658 /*
7659 * end is the last byte in the last page. end == start is legal
7660 */
7661 end_idx = end >> PAGE_SHIFT;
7662
7663 rcu_read_lock();
7664
7665 /* Most of the code in this while loop is lifted from
7666 * find_get_page. It's been modified to begin searching from a
7667 * page and return just the first page found in that range. If the
7668 * found idx is less than or equal to the end idx then we know that
7669 * a page exists. If no pages are found or if those pages are
7670 * outside of the range then we're fine (yay!) */
7671 while (page == NULL &&
7672 radix_tree_gang_lookup_slot(root, &pagep, NULL, start_idx, 1)) {
7673 page = radix_tree_deref_slot(pagep);
7674 if (unlikely(!page))
7675 break;
7676
7677 if (radix_tree_exception(page)) {
7678 if (radix_tree_deref_retry(page)) {
7679 page = NULL;
7680 continue;
7681 }
7682 /*
7683 * Otherwise, shmem/tmpfs must be storing a swap entry
7684 * here as an exceptional entry: so return it without
7685 * attempting to raise page count.
7686 */
7687 page = NULL;
7688 break; /* TODO: Is this relevant for this use case? */
7689 }
7690
7691 if (!page_cache_get_speculative(page)) {
7692 page = NULL;
7693 continue;
7694 }
7695
7696 /*
7697 * Has the page moved?
7698 * This is part of the lockless pagecache protocol. See
7699 * include/linux/pagemap.h for details.
7700 */
7701 if (unlikely(page != *pagep)) {
7702 put_page(page);
7703 page = NULL;
7704 }
7705 }
7706
7707 if (page) {
7708 if (page->index <= end_idx)
7709 found = true;
7710 put_page(page);
7711 }
7712
7713 rcu_read_unlock();
7714 return found;
7715 }
7716
7717 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
7718 struct extent_state **cached_state, int writing)
7719 {
7720 struct btrfs_ordered_extent *ordered;
7721 int ret = 0;
7722
7723 while (1) {
7724 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7725 cached_state);
7726 /*
7727 * We're concerned with the entire range that we're going to be
7728 * doing DIO to, so we need to make sure there's no ordered
7729 * extents in this range.
7730 */
7731 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart,
7732 lockend - lockstart + 1);
7733
7734 /*
7735 * We need to make sure there are no buffered pages in this
7736 * range either, we could have raced between the invalidate in
7737 * generic_file_direct_write and locking the extent. The
7738 * invalidate needs to happen so that reads after a write do not
7739 * get stale data.
7740 */
7741 if (!ordered &&
7742 (!writing ||
7743 !btrfs_page_exists_in_range(inode, lockstart, lockend)))
7744 break;
7745
7746 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7747 cached_state, GFP_NOFS);
7748
7749 if (ordered) {
7750 /*
7751 * If we are doing a DIO read and the ordered extent we
7752 * found is for a buffered write, we can not wait for it
7753 * to complete and retry, because if we do so we can
7754 * deadlock with concurrent buffered writes on page
7755 * locks. This happens only if our DIO read covers more
7756 * than one extent map, if at this point has already
7757 * created an ordered extent for a previous extent map
7758 * and locked its range in the inode's io tree, and a
7759 * concurrent write against that previous extent map's
7760 * range and this range started (we unlock the ranges
7761 * in the io tree only when the bios complete and
7762 * buffered writes always lock pages before attempting
7763 * to lock range in the io tree).
7764 */
7765 if (writing ||
7766 test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags))
7767 btrfs_start_ordered_extent(inode, ordered, 1);
7768 else
7769 ret = -ENOTBLK;
7770 btrfs_put_ordered_extent(ordered);
7771 } else {
7772 /*
7773 * We could trigger writeback for this range (and wait
7774 * for it to complete) and then invalidate the pages for
7775 * this range (through invalidate_inode_pages2_range()),
7776 * but that can lead us to a deadlock with a concurrent
7777 * call to readpages() (a buffered read or a defrag call
7778 * triggered a readahead) on a page lock due to an
7779 * ordered dio extent we created before but did not have
7780 * yet a corresponding bio submitted (whence it can not
7781 * complete), which makes readpages() wait for that
7782 * ordered extent to complete while holding a lock on
7783 * that page.
7784 */
7785 ret = -ENOTBLK;
7786 }
7787
7788 if (ret)
7789 break;
7790
7791 cond_resched();
7792 }
7793
7794 return ret;
7795 }
7796
7797 /* The callers of this must take lock_extent() */
7798 static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len,
7799 u64 orig_start, u64 block_start,
7800 u64 block_len, u64 orig_block_len,
7801 u64 ram_bytes, int compress_type,
7802 int type)
7803 {
7804 struct extent_map_tree *em_tree;
7805 struct extent_map *em;
7806 struct btrfs_root *root = BTRFS_I(inode)->root;
7807 int ret;
7808
7809 ASSERT(type == BTRFS_ORDERED_PREALLOC ||
7810 type == BTRFS_ORDERED_COMPRESSED ||
7811 type == BTRFS_ORDERED_NOCOW ||
7812 type == BTRFS_ORDERED_REGULAR);
7813
7814 em_tree = &BTRFS_I(inode)->extent_tree;
7815 em = alloc_extent_map();
7816 if (!em)
7817 return ERR_PTR(-ENOMEM);
7818
7819 em->start = start;
7820 em->orig_start = orig_start;
7821 em->len = len;
7822 em->block_len = block_len;
7823 em->block_start = block_start;
7824 em->bdev = root->fs_info->fs_devices->latest_bdev;
7825 em->orig_block_len = orig_block_len;
7826 em->ram_bytes = ram_bytes;
7827 em->generation = -1;
7828 set_bit(EXTENT_FLAG_PINNED, &em->flags);
7829 if (type == BTRFS_ORDERED_PREALLOC) {
7830 set_bit(EXTENT_FLAG_FILLING, &em->flags);
7831 } else if (type == BTRFS_ORDERED_COMPRESSED) {
7832 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
7833 em->compress_type = compress_type;
7834 }
7835
7836 do {
7837 btrfs_drop_extent_cache(BTRFS_I(inode), em->start,
7838 em->start + em->len - 1, 0);
7839 write_lock(&em_tree->lock);
7840 ret = add_extent_mapping(em_tree, em, 1);
7841 write_unlock(&em_tree->lock);
7842 /*
7843 * The caller has taken lock_extent(), who could race with us
7844 * to add em?
7845 */
7846 } while (ret == -EEXIST);
7847
7848 if (ret) {
7849 free_extent_map(em);
7850 return ERR_PTR(ret);
7851 }
7852
7853 /* em got 2 refs now, callers needs to do free_extent_map once. */
7854 return em;
7855 }
7856
7857 static void adjust_dio_outstanding_extents(struct inode *inode,
7858 struct btrfs_dio_data *dio_data,
7859 const u64 len)
7860 {
7861 unsigned num_extents = count_max_extents(len);
7862
7863 /*
7864 * If we have an outstanding_extents count still set then we're
7865 * within our reservation, otherwise we need to adjust our inode
7866 * counter appropriately.
7867 */
7868 if (dio_data->outstanding_extents >= num_extents) {
7869 dio_data->outstanding_extents -= num_extents;
7870 } else {
7871 /*
7872 * If dio write length has been split due to no large enough
7873 * contiguous space, we need to compensate our inode counter
7874 * appropriately.
7875 */
7876 u64 num_needed = num_extents - dio_data->outstanding_extents;
7877
7878 spin_lock(&BTRFS_I(inode)->lock);
7879 BTRFS_I(inode)->outstanding_extents += num_needed;
7880 spin_unlock(&BTRFS_I(inode)->lock);
7881 }
7882 }
7883
7884 static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7885 struct buffer_head *bh_result, int create)
7886 {
7887 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7888 struct extent_map *em;
7889 struct extent_state *cached_state = NULL;
7890 struct btrfs_dio_data *dio_data = NULL;
7891 u64 start = iblock << inode->i_blkbits;
7892 u64 lockstart, lockend;
7893 u64 len = bh_result->b_size;
7894 int unlock_bits = EXTENT_LOCKED;
7895 int ret = 0;
7896
7897 if (create)
7898 unlock_bits |= EXTENT_DIRTY;
7899 else
7900 len = min_t(u64, len, fs_info->sectorsize);
7901
7902 lockstart = start;
7903 lockend = start + len - 1;
7904
7905 if (current->journal_info) {
7906 /*
7907 * Need to pull our outstanding extents and set journal_info to NULL so
7908 * that anything that needs to check if there's a transaction doesn't get
7909 * confused.
7910 */
7911 dio_data = current->journal_info;
7912 current->journal_info = NULL;
7913 }
7914
7915 /*
7916 * If this errors out it's because we couldn't invalidate pagecache for
7917 * this range and we need to fallback to buffered.
7918 */
7919 if (lock_extent_direct(inode, lockstart, lockend, &cached_state,
7920 create)) {
7921 ret = -ENOTBLK;
7922 goto err;
7923 }
7924
7925 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0);
7926 if (IS_ERR(em)) {
7927 ret = PTR_ERR(em);
7928 goto unlock_err;
7929 }
7930
7931 /*
7932 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
7933 * io. INLINE is special, and we could probably kludge it in here, but
7934 * it's still buffered so for safety lets just fall back to the generic
7935 * buffered path.
7936 *
7937 * For COMPRESSED we _have_ to read the entire extent in so we can
7938 * decompress it, so there will be buffering required no matter what we
7939 * do, so go ahead and fallback to buffered.
7940 *
7941 * We return -ENOTBLK because that's what makes DIO go ahead and go back
7942 * to buffered IO. Don't blame me, this is the price we pay for using
7943 * the generic code.
7944 */
7945 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
7946 em->block_start == EXTENT_MAP_INLINE) {
7947 free_extent_map(em);
7948 ret = -ENOTBLK;
7949 goto unlock_err;
7950 }
7951
7952 /* Just a good old fashioned hole, return */
7953 if (!create && (em->block_start == EXTENT_MAP_HOLE ||
7954 test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
7955 free_extent_map(em);
7956 goto unlock_err;
7957 }
7958
7959 /*
7960 * We don't allocate a new extent in the following cases
7961 *
7962 * 1) The inode is marked as NODATACOW. In this case we'll just use the
7963 * existing extent.
7964 * 2) The extent is marked as PREALLOC. We're good to go here and can
7965 * just use the extent.
7966 *
7967 */
7968 if (!create) {
7969 len = min(len, em->len - (start - em->start));
7970 lockstart = start + len;
7971 goto unlock;
7972 }
7973
7974 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
7975 ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7976 em->block_start != EXTENT_MAP_HOLE)) {
7977 int type;
7978 u64 block_start, orig_start, orig_block_len, ram_bytes;
7979
7980 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7981 type = BTRFS_ORDERED_PREALLOC;
7982 else
7983 type = BTRFS_ORDERED_NOCOW;
7984 len = min(len, em->len - (start - em->start));
7985 block_start = em->block_start + (start - em->start);
7986
7987 if (can_nocow_extent(inode, start, &len, &orig_start,
7988 &orig_block_len, &ram_bytes) == 1 &&
7989 btrfs_inc_nocow_writers(fs_info, block_start)) {
7990 struct extent_map *em2;
7991
7992 em2 = btrfs_create_dio_extent(inode, start, len,
7993 orig_start, block_start,
7994 len, orig_block_len,
7995 ram_bytes, type);
7996 btrfs_dec_nocow_writers(fs_info, block_start);
7997 if (type == BTRFS_ORDERED_PREALLOC) {
7998 free_extent_map(em);
7999 em = em2;
8000 }
8001 if (em2 && IS_ERR(em2)) {
8002 ret = PTR_ERR(em2);
8003 goto unlock_err;
8004 }
8005 /*
8006 * For inode marked NODATACOW or extent marked PREALLOC,
8007 * use the existing or preallocated extent, so does not
8008 * need to adjust btrfs_space_info's bytes_may_use.
8009 */
8010 btrfs_free_reserved_data_space_noquota(inode,
8011 start, len);
8012 goto unlock;
8013 }
8014 }
8015
8016 /*
8017 * this will cow the extent, reset the len in case we changed
8018 * it above
8019 */
8020 len = bh_result->b_size;
8021 free_extent_map(em);
8022 em = btrfs_new_extent_direct(inode, start, len);
8023 if (IS_ERR(em)) {
8024 ret = PTR_ERR(em);
8025 goto unlock_err;
8026 }
8027 len = min(len, em->len - (start - em->start));
8028 unlock:
8029 bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
8030 inode->i_blkbits;
8031 bh_result->b_size = len;
8032 bh_result->b_bdev = em->bdev;
8033 set_buffer_mapped(bh_result);
8034 if (create) {
8035 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
8036 set_buffer_new(bh_result);
8037
8038 /*
8039 * Need to update the i_size under the extent lock so buffered
8040 * readers will get the updated i_size when we unlock.
8041 */
8042 if (!dio_data->overwrite && start + len > i_size_read(inode))
8043 i_size_write(inode, start + len);
8044
8045 adjust_dio_outstanding_extents(inode, dio_data, len);
8046 WARN_ON(dio_data->reserve < len);
8047 dio_data->reserve -= len;
8048 dio_data->unsubmitted_oe_range_end = start + len;
8049 current->journal_info = dio_data;
8050 }
8051
8052 /*
8053 * In the case of write we need to clear and unlock the entire range,
8054 * in the case of read we need to unlock only the end area that we
8055 * aren't using if there is any left over space.
8056 */
8057 if (lockstart < lockend) {
8058 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
8059 lockend, unlock_bits, 1, 0,
8060 &cached_state, GFP_NOFS);
8061 } else {
8062 free_extent_state(cached_state);
8063 }
8064
8065 free_extent_map(em);
8066
8067 return 0;
8068
8069 unlock_err:
8070 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
8071 unlock_bits, 1, 0, &cached_state, GFP_NOFS);
8072 err:
8073 if (dio_data)
8074 current->journal_info = dio_data;
8075 /*
8076 * Compensate the delalloc release we do in btrfs_direct_IO() when we
8077 * write less data then expected, so that we don't underflow our inode's
8078 * outstanding extents counter.
8079 */
8080 if (create && dio_data)
8081 adjust_dio_outstanding_extents(inode, dio_data, len);
8082
8083 return ret;
8084 }
8085
8086 static inline blk_status_t submit_dio_repair_bio(struct inode *inode,
8087 struct bio *bio,
8088 int mirror_num)
8089 {
8090 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8091 blk_status_t ret;
8092
8093 BUG_ON(bio_op(bio) == REQ_OP_WRITE);
8094
8095 bio_get(bio);
8096
8097 ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DIO_REPAIR);
8098 if (ret)
8099 goto err;
8100
8101 ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
8102 err:
8103 bio_put(bio);
8104 return ret;
8105 }
8106
8107 static int btrfs_check_dio_repairable(struct inode *inode,
8108 struct bio *failed_bio,
8109 struct io_failure_record *failrec,
8110 int failed_mirror)
8111 {
8112 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8113 int num_copies;
8114
8115 num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
8116 if (num_copies == 1) {
8117 /*
8118 * we only have a single copy of the data, so don't bother with
8119 * all the retry and error correction code that follows. no
8120 * matter what the error is, it is very likely to persist.
8121 */
8122 btrfs_debug(fs_info,
8123 "Check DIO Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
8124 num_copies, failrec->this_mirror, failed_mirror);
8125 return 0;
8126 }
8127
8128 failrec->failed_mirror = failed_mirror;
8129 failrec->this_mirror++;
8130 if (failrec->this_mirror == failed_mirror)
8131 failrec->this_mirror++;
8132
8133 if (failrec->this_mirror > num_copies) {
8134 btrfs_debug(fs_info,
8135 "Check DIO Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
8136 num_copies, failrec->this_mirror, failed_mirror);
8137 return 0;
8138 }
8139
8140 return 1;
8141 }
8142
8143 static blk_status_t dio_read_error(struct inode *inode, struct bio *failed_bio,
8144 struct page *page, unsigned int pgoff,
8145 u64 start, u64 end, int failed_mirror,
8146 bio_end_io_t *repair_endio, void *repair_arg)
8147 {
8148 struct io_failure_record *failrec;
8149 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
8150 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
8151 struct bio *bio;
8152 int isector;
8153 unsigned int read_mode = 0;
8154 int segs;
8155 int ret;
8156 blk_status_t status;
8157
8158 BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
8159
8160 ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
8161 if (ret)
8162 return errno_to_blk_status(ret);
8163
8164 ret = btrfs_check_dio_repairable(inode, failed_bio, failrec,
8165 failed_mirror);
8166 if (!ret) {
8167 free_io_failure(failure_tree, io_tree, failrec);
8168 return BLK_STS_IOERR;
8169 }
8170
8171 segs = bio_segments(failed_bio);
8172 if (segs > 1 ||
8173 (failed_bio->bi_io_vec->bv_len > btrfs_inode_sectorsize(inode)))
8174 read_mode |= REQ_FAILFAST_DEV;
8175
8176 isector = start - btrfs_io_bio(failed_bio)->logical;
8177 isector >>= inode->i_sb->s_blocksize_bits;
8178 bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
8179 pgoff, isector, repair_endio, repair_arg);
8180 bio_set_op_attrs(bio, REQ_OP_READ, read_mode);
8181
8182 btrfs_debug(BTRFS_I(inode)->root->fs_info,
8183 "repair DIO read error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d",
8184 read_mode, failrec->this_mirror, failrec->in_validation);
8185
8186 status = submit_dio_repair_bio(inode, bio, failrec->this_mirror);
8187 if (status) {
8188 free_io_failure(failure_tree, io_tree, failrec);
8189 bio_put(bio);
8190 }
8191
8192 return status;
8193 }
8194
8195 struct btrfs_retry_complete {
8196 struct completion done;
8197 struct inode *inode;
8198 u64 start;
8199 int uptodate;
8200 };
8201
8202 static void btrfs_retry_endio_nocsum(struct bio *bio)
8203 {
8204 struct btrfs_retry_complete *done = bio->bi_private;
8205 struct inode *inode = done->inode;
8206 struct bio_vec *bvec;
8207 struct extent_io_tree *io_tree, *failure_tree;
8208 int i;
8209
8210 if (bio->bi_status)
8211 goto end;
8212
8213 ASSERT(bio->bi_vcnt == 1);
8214 io_tree = &BTRFS_I(inode)->io_tree;
8215 failure_tree = &BTRFS_I(inode)->io_failure_tree;
8216 ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode));
8217
8218 done->uptodate = 1;
8219 ASSERT(!bio_flagged(bio, BIO_CLONED));
8220 bio_for_each_segment_all(bvec, bio, i)
8221 clean_io_failure(BTRFS_I(inode)->root->fs_info, failure_tree,
8222 io_tree, done->start, bvec->bv_page,
8223 btrfs_ino(BTRFS_I(inode)), 0);
8224 end:
8225 complete(&done->done);
8226 bio_put(bio);
8227 }
8228
8229 static blk_status_t __btrfs_correct_data_nocsum(struct inode *inode,
8230 struct btrfs_io_bio *io_bio)
8231 {
8232 struct btrfs_fs_info *fs_info;
8233 struct bio_vec bvec;
8234 struct bvec_iter iter;
8235 struct btrfs_retry_complete done;
8236 u64 start;
8237 unsigned int pgoff;
8238 u32 sectorsize;
8239 int nr_sectors;
8240 blk_status_t ret;
8241 blk_status_t err = BLK_STS_OK;
8242
8243 fs_info = BTRFS_I(inode)->root->fs_info;
8244 sectorsize = fs_info->sectorsize;
8245
8246 start = io_bio->logical;
8247 done.inode = inode;
8248 io_bio->bio.bi_iter = io_bio->iter;
8249
8250 bio_for_each_segment(bvec, &io_bio->bio, iter) {
8251 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec.bv_len);
8252 pgoff = bvec.bv_offset;
8253
8254 next_block_or_try_again:
8255 done.uptodate = 0;
8256 done.start = start;
8257 init_completion(&done.done);
8258
8259 ret = dio_read_error(inode, &io_bio->bio, bvec.bv_page,
8260 pgoff, start, start + sectorsize - 1,
8261 io_bio->mirror_num,
8262 btrfs_retry_endio_nocsum, &done);
8263 if (ret) {
8264 err = ret;
8265 goto next;
8266 }
8267
8268 wait_for_completion_io(&done.done);
8269
8270 if (!done.uptodate) {
8271 /* We might have another mirror, so try again */
8272 goto next_block_or_try_again;
8273 }
8274
8275 next:
8276 start += sectorsize;
8277
8278 nr_sectors--;
8279 if (nr_sectors) {
8280 pgoff += sectorsize;
8281 ASSERT(pgoff < PAGE_SIZE);
8282 goto next_block_or_try_again;
8283 }
8284 }
8285
8286 return err;
8287 }
8288
8289 static void btrfs_retry_endio(struct bio *bio)
8290 {
8291 struct btrfs_retry_complete *done = bio->bi_private;
8292 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
8293 struct extent_io_tree *io_tree, *failure_tree;
8294 struct inode *inode = done->inode;
8295 struct bio_vec *bvec;
8296 int uptodate;
8297 int ret;
8298 int i;
8299
8300 if (bio->bi_status)
8301 goto end;
8302
8303 uptodate = 1;
8304
8305 ASSERT(bio->bi_vcnt == 1);
8306 ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(done->inode));
8307
8308 io_tree = &BTRFS_I(inode)->io_tree;
8309 failure_tree = &BTRFS_I(inode)->io_failure_tree;
8310
8311 ASSERT(!bio_flagged(bio, BIO_CLONED));
8312 bio_for_each_segment_all(bvec, bio, i) {
8313 ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page,
8314 bvec->bv_offset, done->start,
8315 bvec->bv_len);
8316 if (!ret)
8317 clean_io_failure(BTRFS_I(inode)->root->fs_info,
8318 failure_tree, io_tree, done->start,
8319 bvec->bv_page,
8320 btrfs_ino(BTRFS_I(inode)),
8321 bvec->bv_offset);
8322 else
8323 uptodate = 0;
8324 }
8325
8326 done->uptodate = uptodate;
8327 end:
8328 complete(&done->done);
8329 bio_put(bio);
8330 }
8331
8332 static blk_status_t __btrfs_subio_endio_read(struct inode *inode,
8333 struct btrfs_io_bio *io_bio, blk_status_t err)
8334 {
8335 struct btrfs_fs_info *fs_info;
8336 struct bio_vec bvec;
8337 struct bvec_iter iter;
8338 struct btrfs_retry_complete done;
8339 u64 start;
8340 u64 offset = 0;
8341 u32 sectorsize;
8342 int nr_sectors;
8343 unsigned int pgoff;
8344 int csum_pos;
8345 bool uptodate = (err == 0);
8346 int ret;
8347 blk_status_t status;
8348
8349 fs_info = BTRFS_I(inode)->root->fs_info;
8350 sectorsize = fs_info->sectorsize;
8351
8352 err = BLK_STS_OK;
8353 start = io_bio->logical;
8354 done.inode = inode;
8355 io_bio->bio.bi_iter = io_bio->iter;
8356
8357 bio_for_each_segment(bvec, &io_bio->bio, iter) {
8358 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec.bv_len);
8359
8360 pgoff = bvec.bv_offset;
8361 next_block:
8362 if (uptodate) {
8363 csum_pos = BTRFS_BYTES_TO_BLKS(fs_info, offset);
8364 ret = __readpage_endio_check(inode, io_bio, csum_pos,
8365 bvec.bv_page, pgoff, start, sectorsize);
8366 if (likely(!ret))
8367 goto next;
8368 }
8369 try_again:
8370 done.uptodate = 0;
8371 done.start = start;
8372 init_completion(&done.done);
8373
8374 status = dio_read_error(inode, &io_bio->bio, bvec.bv_page,
8375 pgoff, start, start + sectorsize - 1,
8376 io_bio->mirror_num, btrfs_retry_endio,
8377 &done);
8378 if (status) {
8379 err = status;
8380 goto next;
8381 }
8382
8383 wait_for_completion_io(&done.done);
8384
8385 if (!done.uptodate) {
8386 /* We might have another mirror, so try again */
8387 goto try_again;
8388 }
8389 next:
8390 offset += sectorsize;
8391 start += sectorsize;
8392
8393 ASSERT(nr_sectors);
8394
8395 nr_sectors--;
8396 if (nr_sectors) {
8397 pgoff += sectorsize;
8398 ASSERT(pgoff < PAGE_SIZE);
8399 goto next_block;
8400 }
8401 }
8402
8403 return err;
8404 }
8405
8406 static blk_status_t btrfs_subio_endio_read(struct inode *inode,
8407 struct btrfs_io_bio *io_bio, blk_status_t err)
8408 {
8409 bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
8410
8411 if (skip_csum) {
8412 if (unlikely(err))
8413 return __btrfs_correct_data_nocsum(inode, io_bio);
8414 else
8415 return BLK_STS_OK;
8416 } else {
8417 return __btrfs_subio_endio_read(inode, io_bio, err);
8418 }
8419 }
8420
8421 static void btrfs_endio_direct_read(struct bio *bio)
8422 {
8423 struct btrfs_dio_private *dip = bio->bi_private;
8424 struct inode *inode = dip->inode;
8425 struct bio *dio_bio;
8426 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
8427 blk_status_t err = bio->bi_status;
8428
8429 if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED)
8430 err = btrfs_subio_endio_read(inode, io_bio, err);
8431
8432 unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
8433 dip->logical_offset + dip->bytes - 1);
8434 dio_bio = dip->dio_bio;
8435
8436 kfree(dip);
8437
8438 dio_bio->bi_status = err;
8439 dio_end_io(dio_bio);
8440
8441 if (io_bio->end_io)
8442 io_bio->end_io(io_bio, blk_status_to_errno(err));
8443 bio_put(bio);
8444 }
8445
8446 static void __endio_write_update_ordered(struct inode *inode,
8447 const u64 offset, const u64 bytes,
8448 const bool uptodate)
8449 {
8450 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8451 struct btrfs_ordered_extent *ordered = NULL;
8452 struct btrfs_workqueue *wq;
8453 btrfs_work_func_t func;
8454 u64 ordered_offset = offset;
8455 u64 ordered_bytes = bytes;
8456 u64 last_offset;
8457 int ret;
8458
8459 if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
8460 wq = fs_info->endio_freespace_worker;
8461 func = btrfs_freespace_write_helper;
8462 } else {
8463 wq = fs_info->endio_write_workers;
8464 func = btrfs_endio_write_helper;
8465 }
8466
8467 again:
8468 last_offset = ordered_offset;
8469 ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
8470 &ordered_offset,
8471 ordered_bytes,
8472 uptodate);
8473 if (!ret)
8474 goto out_test;
8475
8476 btrfs_init_work(&ordered->work, func, finish_ordered_fn, NULL, NULL);
8477 btrfs_queue_work(wq, &ordered->work);
8478 out_test:
8479 /*
8480 * If btrfs_dec_test_ordered_pending does not find any ordered extent
8481 * in the range, we can exit.
8482 */
8483 if (ordered_offset == last_offset)
8484 return;
8485 /*
8486 * our bio might span multiple ordered extents. If we haven't
8487 * completed the accounting for the whole dio, go back and try again
8488 */
8489 if (ordered_offset < offset + bytes) {
8490 ordered_bytes = offset + bytes - ordered_offset;
8491 ordered = NULL;
8492 goto again;
8493 }
8494 }
8495
8496 static void btrfs_endio_direct_write(struct bio *bio)
8497 {
8498 struct btrfs_dio_private *dip = bio->bi_private;
8499 struct bio *dio_bio = dip->dio_bio;
8500
8501 __endio_write_update_ordered(dip->inode, dip->logical_offset,
8502 dip->bytes, !bio->bi_status);
8503
8504 kfree(dip);
8505
8506 dio_bio->bi_status = bio->bi_status;
8507 dio_end_io(dio_bio);
8508 bio_put(bio);
8509 }
8510
8511 static blk_status_t __btrfs_submit_bio_start_direct_io(void *private_data,
8512 struct bio *bio, int mirror_num,
8513 unsigned long bio_flags, u64 offset)
8514 {
8515 struct inode *inode = private_data;
8516 blk_status_t ret;
8517 ret = btrfs_csum_one_bio(inode, bio, offset, 1);
8518 BUG_ON(ret); /* -ENOMEM */
8519 return 0;
8520 }
8521
8522 static void btrfs_end_dio_bio(struct bio *bio)
8523 {
8524 struct btrfs_dio_private *dip = bio->bi_private;
8525 blk_status_t err = bio->bi_status;
8526
8527 if (err)
8528 btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
8529 "direct IO failed ino %llu rw %d,%u sector %#Lx len %u err no %d",
8530 btrfs_ino(BTRFS_I(dip->inode)), bio_op(bio),
8531 bio->bi_opf,
8532 (unsigned long long)bio->bi_iter.bi_sector,
8533 bio->bi_iter.bi_size, err);
8534
8535 if (dip->subio_endio)
8536 err = dip->subio_endio(dip->inode, btrfs_io_bio(bio), err);
8537
8538 if (err) {
8539 dip->errors = 1;
8540
8541 /*
8542 * before atomic variable goto zero, we must make sure
8543 * dip->errors is perceived to be set.
8544 */
8545 smp_mb__before_atomic();
8546 }
8547
8548 /* if there are more bios still pending for this dio, just exit */
8549 if (!atomic_dec_and_test(&dip->pending_bios))
8550 goto out;
8551
8552 if (dip->errors) {
8553 bio_io_error(dip->orig_bio);
8554 } else {
8555 dip->dio_bio->bi_status = 0;
8556 bio_endio(dip->orig_bio);
8557 }
8558 out:
8559 bio_put(bio);
8560 }
8561
8562 static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode,
8563 struct btrfs_dio_private *dip,
8564 struct bio *bio,
8565 u64 file_offset)
8566 {
8567 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
8568 struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio);
8569 blk_status_t ret;
8570
8571 /*
8572 * We load all the csum data we need when we submit
8573 * the first bio to reduce the csum tree search and
8574 * contention.
8575 */
8576 if (dip->logical_offset == file_offset) {
8577 ret = btrfs_lookup_bio_sums_dio(inode, dip->orig_bio,
8578 file_offset);
8579 if (ret)
8580 return ret;
8581 }
8582
8583 if (bio == dip->orig_bio)
8584 return 0;
8585
8586 file_offset -= dip->logical_offset;
8587 file_offset >>= inode->i_sb->s_blocksize_bits;
8588 io_bio->csum = (u8 *)(((u32 *)orig_io_bio->csum) + file_offset);
8589
8590 return 0;
8591 }
8592
8593 static inline blk_status_t
8594 __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, u64 file_offset,
8595 int async_submit)
8596 {
8597 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8598 struct btrfs_dio_private *dip = bio->bi_private;
8599 bool write = bio_op(bio) == REQ_OP_WRITE;
8600 blk_status_t ret;
8601
8602 if (async_submit)
8603 async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
8604
8605 bio_get(bio);
8606
8607 if (!write) {
8608 ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
8609 if (ret)
8610 goto err;
8611 }
8612
8613 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
8614 goto map;
8615
8616 if (write && async_submit) {
8617 ret = btrfs_wq_submit_bio(fs_info, bio, 0, 0,
8618 file_offset, inode,
8619 __btrfs_submit_bio_start_direct_io,
8620 __btrfs_submit_bio_done);
8621 goto err;
8622 } else if (write) {
8623 /*
8624 * If we aren't doing async submit, calculate the csum of the
8625 * bio now.
8626 */
8627 ret = btrfs_csum_one_bio(inode, bio, file_offset, 1);
8628 if (ret)
8629 goto err;
8630 } else {
8631 ret = btrfs_lookup_and_bind_dio_csum(inode, dip, bio,
8632 file_offset);
8633 if (ret)
8634 goto err;
8635 }
8636 map:
8637 ret = btrfs_map_bio(fs_info, bio, 0, async_submit);
8638 err:
8639 bio_put(bio);
8640 return ret;
8641 }
8642
8643 static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip)
8644 {
8645 struct inode *inode = dip->inode;
8646 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8647 struct bio *bio;
8648 struct bio *orig_bio = dip->orig_bio;
8649 u64 start_sector = orig_bio->bi_iter.bi_sector;
8650 u64 file_offset = dip->logical_offset;
8651 u64 map_length;
8652 int async_submit = 0;
8653 u64 submit_len;
8654 int clone_offset = 0;
8655 int clone_len;
8656 int ret;
8657 blk_status_t status;
8658
8659 map_length = orig_bio->bi_iter.bi_size;
8660 submit_len = map_length;
8661 ret = btrfs_map_block(fs_info, btrfs_op(orig_bio), start_sector << 9,
8662 &map_length, NULL, 0);
8663 if (ret)
8664 return -EIO;
8665
8666 if (map_length >= submit_len) {
8667 bio = orig_bio;
8668 dip->flags |= BTRFS_DIO_ORIG_BIO_SUBMITTED;
8669 goto submit;
8670 }
8671
8672 /* async crcs make it difficult to collect full stripe writes. */
8673 if (btrfs_data_alloc_profile(fs_info) & BTRFS_BLOCK_GROUP_RAID56_MASK)
8674 async_submit = 0;
8675 else
8676 async_submit = 1;
8677
8678 /* bio split */
8679 ASSERT(map_length <= INT_MAX);
8680 atomic_inc(&dip->pending_bios);
8681 do {
8682 clone_len = min_t(int, submit_len, map_length);
8683
8684 /*
8685 * This will never fail as it's passing GPF_NOFS and
8686 * the allocation is backed by btrfs_bioset.
8687 */
8688 bio = btrfs_bio_clone_partial(orig_bio, clone_offset,
8689 clone_len);
8690 bio->bi_private = dip;
8691 bio->bi_end_io = btrfs_end_dio_bio;
8692 btrfs_io_bio(bio)->logical = file_offset;
8693
8694 ASSERT(submit_len >= clone_len);
8695 submit_len -= clone_len;
8696 if (submit_len == 0)
8697 break;
8698
8699 /*
8700 * Increase the count before we submit the bio so we know
8701 * the end IO handler won't happen before we increase the
8702 * count. Otherwise, the dip might get freed before we're
8703 * done setting it up.
8704 */
8705 atomic_inc(&dip->pending_bios);
8706
8707 status = __btrfs_submit_dio_bio(bio, inode, file_offset,
8708 async_submit);
8709 if (status) {
8710 bio_put(bio);
8711 atomic_dec(&dip->pending_bios);
8712 goto out_err;
8713 }
8714
8715 clone_offset += clone_len;
8716 start_sector += clone_len >> 9;
8717 file_offset += clone_len;
8718
8719 map_length = submit_len;
8720 ret = btrfs_map_block(fs_info, btrfs_op(orig_bio),
8721 start_sector << 9, &map_length, NULL, 0);
8722 if (ret)
8723 goto out_err;
8724 } while (submit_len > 0);
8725
8726 submit:
8727 status = __btrfs_submit_dio_bio(bio, inode, file_offset, async_submit);
8728 if (!status)
8729 return 0;
8730
8731 bio_put(bio);
8732 out_err:
8733 dip->errors = 1;
8734 /*
8735 * before atomic variable goto zero, we must
8736 * make sure dip->errors is perceived to be set.
8737 */
8738 smp_mb__before_atomic();
8739 if (atomic_dec_and_test(&dip->pending_bios))
8740 bio_io_error(dip->orig_bio);
8741
8742 /* bio_end_io() will handle error, so we needn't return it */
8743 return 0;
8744 }
8745
8746 static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode,
8747 loff_t file_offset)
8748 {
8749 struct btrfs_dio_private *dip = NULL;
8750 struct bio *bio = NULL;
8751 struct btrfs_io_bio *io_bio;
8752 bool write = (bio_op(dio_bio) == REQ_OP_WRITE);
8753 int ret = 0;
8754
8755 bio = btrfs_bio_clone(dio_bio);
8756
8757 dip = kzalloc(sizeof(*dip), GFP_NOFS);
8758 if (!dip) {
8759 ret = -ENOMEM;
8760 goto free_ordered;
8761 }
8762
8763 dip->private = dio_bio->bi_private;
8764 dip->inode = inode;
8765 dip->logical_offset = file_offset;
8766 dip->bytes = dio_bio->bi_iter.bi_size;
8767 dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9;
8768 bio->bi_private = dip;
8769 dip->orig_bio = bio;
8770 dip->dio_bio = dio_bio;
8771 atomic_set(&dip->pending_bios, 0);
8772 io_bio = btrfs_io_bio(bio);
8773 io_bio->logical = file_offset;
8774
8775 if (write) {
8776 bio->bi_end_io = btrfs_endio_direct_write;
8777 } else {
8778 bio->bi_end_io = btrfs_endio_direct_read;
8779 dip->subio_endio = btrfs_subio_endio_read;
8780 }
8781
8782 /*
8783 * Reset the range for unsubmitted ordered extents (to a 0 length range)
8784 * even if we fail to submit a bio, because in such case we do the
8785 * corresponding error handling below and it must not be done a second
8786 * time by btrfs_direct_IO().
8787 */
8788 if (write) {
8789 struct btrfs_dio_data *dio_data = current->journal_info;
8790
8791 dio_data->unsubmitted_oe_range_end = dip->logical_offset +
8792 dip->bytes;
8793 dio_data->unsubmitted_oe_range_start =
8794 dio_data->unsubmitted_oe_range_end;
8795 }
8796
8797 ret = btrfs_submit_direct_hook(dip);
8798 if (!ret)
8799 return;
8800
8801 if (io_bio->end_io)
8802 io_bio->end_io(io_bio, ret);
8803
8804 free_ordered:
8805 /*
8806 * If we arrived here it means either we failed to submit the dip
8807 * or we either failed to clone the dio_bio or failed to allocate the
8808 * dip. If we cloned the dio_bio and allocated the dip, we can just
8809 * call bio_endio against our io_bio so that we get proper resource
8810 * cleanup if we fail to submit the dip, otherwise, we must do the
8811 * same as btrfs_endio_direct_[write|read] because we can't call these
8812 * callbacks - they require an allocated dip and a clone of dio_bio.
8813 */
8814 if (bio && dip) {
8815 bio_io_error(bio);
8816 /*
8817 * The end io callbacks free our dip, do the final put on bio
8818 * and all the cleanup and final put for dio_bio (through
8819 * dio_end_io()).
8820 */
8821 dip = NULL;
8822 bio = NULL;
8823 } else {
8824 if (write)
8825 __endio_write_update_ordered(inode,
8826 file_offset,
8827 dio_bio->bi_iter.bi_size,
8828 false);
8829 else
8830 unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
8831 file_offset + dio_bio->bi_iter.bi_size - 1);
8832
8833 dio_bio->bi_status = BLK_STS_IOERR;
8834 /*
8835 * Releases and cleans up our dio_bio, no need to bio_put()
8836 * nor bio_endio()/bio_io_error() against dio_bio.
8837 */
8838 dio_end_io(dio_bio);
8839 }
8840 if (bio)
8841 bio_put(bio);
8842 kfree(dip);
8843 }
8844
8845 static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
8846 struct kiocb *iocb,
8847 const struct iov_iter *iter, loff_t offset)
8848 {
8849 int seg;
8850 int i;
8851 unsigned int blocksize_mask = fs_info->sectorsize - 1;
8852 ssize_t retval = -EINVAL;
8853
8854 if (offset & blocksize_mask)
8855 goto out;
8856
8857 if (iov_iter_alignment(iter) & blocksize_mask)
8858 goto out;
8859
8860 /* If this is a write we don't need to check anymore */
8861 if (iov_iter_rw(iter) != READ || !iter_is_iovec(iter))
8862 return 0;
8863 /*
8864 * Check to make sure we don't have duplicate iov_base's in this
8865 * iovec, if so return EINVAL, otherwise we'll get csum errors
8866 * when reading back.
8867 */
8868 for (seg = 0; seg < iter->nr_segs; seg++) {
8869 for (i = seg + 1; i < iter->nr_segs; i++) {
8870 if (iter->iov[seg].iov_base == iter->iov[i].iov_base)
8871 goto out;
8872 }
8873 }
8874 retval = 0;
8875 out:
8876 return retval;
8877 }
8878
8879 static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
8880 {
8881 struct file *file = iocb->ki_filp;
8882 struct inode *inode = file->f_mapping->host;
8883 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8884 struct btrfs_dio_data dio_data = { 0 };
8885 struct extent_changeset *data_reserved = NULL;
8886 loff_t offset = iocb->ki_pos;
8887 size_t count = 0;
8888 int flags = 0;
8889 bool wakeup = true;
8890 bool relock = false;
8891 ssize_t ret;
8892
8893 if (check_direct_IO(fs_info, iocb, iter, offset))
8894 return 0;
8895
8896 inode_dio_begin(inode);
8897
8898 /*
8899 * The generic stuff only does filemap_write_and_wait_range, which
8900 * isn't enough if we've written compressed pages to this area, so
8901 * we need to flush the dirty pages again to make absolutely sure
8902 * that any outstanding dirty pages are on disk.
8903 */
8904 count = iov_iter_count(iter);
8905 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
8906 &BTRFS_I(inode)->runtime_flags))
8907 filemap_fdatawrite_range(inode->i_mapping, offset,
8908 offset + count - 1);
8909
8910 if (iov_iter_rw(iter) == WRITE) {
8911 /*
8912 * If the write DIO is beyond the EOF, we need update
8913 * the isize, but it is protected by i_mutex. So we can
8914 * not unlock the i_mutex at this case.
8915 */
8916 if (offset + count <= inode->i_size) {
8917 dio_data.overwrite = 1;
8918 inode_unlock(inode);
8919 relock = true;
8920 } else if (iocb->ki_flags & IOCB_NOWAIT) {
8921 ret = -EAGAIN;
8922 goto out;
8923 }
8924 ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
8925 offset, count);
8926 if (ret)
8927 goto out;
8928 dio_data.outstanding_extents = count_max_extents(count);
8929
8930 /*
8931 * We need to know how many extents we reserved so that we can
8932 * do the accounting properly if we go over the number we
8933 * originally calculated. Abuse current->journal_info for this.
8934 */
8935 dio_data.reserve = round_up(count,
8936 fs_info->sectorsize);
8937 dio_data.unsubmitted_oe_range_start = (u64)offset;
8938 dio_data.unsubmitted_oe_range_end = (u64)offset;
8939 current->journal_info = &dio_data;
8940 down_read(&BTRFS_I(inode)->dio_sem);
8941 } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
8942 &BTRFS_I(inode)->runtime_flags)) {
8943 inode_dio_end(inode);
8944 flags = DIO_LOCKING | DIO_SKIP_HOLES;
8945 wakeup = false;
8946 }
8947
8948 ret = __blockdev_direct_IO(iocb, inode,
8949 fs_info->fs_devices->latest_bdev,
8950 iter, btrfs_get_blocks_direct, NULL,
8951 btrfs_submit_direct, flags);
8952 if (iov_iter_rw(iter) == WRITE) {
8953 up_read(&BTRFS_I(inode)->dio_sem);
8954 current->journal_info = NULL;
8955 if (ret < 0 && ret != -EIOCBQUEUED) {
8956 if (dio_data.reserve)
8957 btrfs_delalloc_release_space(inode, data_reserved,
8958 offset, dio_data.reserve);
8959 /*
8960 * On error we might have left some ordered extents
8961 * without submitting corresponding bios for them, so
8962 * cleanup them up to avoid other tasks getting them
8963 * and waiting for them to complete forever.
8964 */
8965 if (dio_data.unsubmitted_oe_range_start <
8966 dio_data.unsubmitted_oe_range_end)
8967 __endio_write_update_ordered(inode,
8968 dio_data.unsubmitted_oe_range_start,
8969 dio_data.unsubmitted_oe_range_end -
8970 dio_data.unsubmitted_oe_range_start,
8971 false);
8972 } else if (ret >= 0 && (size_t)ret < count)
8973 btrfs_delalloc_release_space(inode, data_reserved,
8974 offset, count - (size_t)ret);
8975 }
8976 out:
8977 if (wakeup)
8978 inode_dio_end(inode);
8979 if (relock)
8980 inode_lock(inode);
8981
8982 extent_changeset_free(data_reserved);
8983 return ret;
8984 }
8985
8986 #define BTRFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC)
8987
8988 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
8989 __u64 start, __u64 len)
8990 {
8991 int ret;
8992
8993 ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS);
8994 if (ret)
8995 return ret;
8996
8997 return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
8998 }
8999
9000 int btrfs_readpage(struct file *file, struct page *page)
9001 {
9002 struct extent_io_tree *tree;
9003 tree = &BTRFS_I(page->mapping->host)->io_tree;
9004 return extent_read_full_page(tree, page, btrfs_get_extent, 0);
9005 }
9006
9007 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
9008 {
9009 struct extent_io_tree *tree;
9010 struct inode *inode = page->mapping->host;
9011 int ret;
9012
9013 if (current->flags & PF_MEMALLOC) {
9014 redirty_page_for_writepage(wbc, page);
9015 unlock_page(page);
9016 return 0;
9017 }
9018
9019 /*
9020 * If we are under memory pressure we will call this directly from the
9021 * VM, we need to make sure we have the inode referenced for the ordered
9022 * extent. If not just return like we didn't do anything.
9023 */
9024 if (!igrab(inode)) {
9025 redirty_page_for_writepage(wbc, page);
9026 return AOP_WRITEPAGE_ACTIVATE;
9027 }
9028 tree = &BTRFS_I(page->mapping->host)->io_tree;
9029 ret = extent_write_full_page(tree, page, btrfs_get_extent, wbc);
9030 btrfs_add_delayed_iput(inode);
9031 return ret;
9032 }
9033
9034 static int btrfs_writepages(struct address_space *mapping,
9035 struct writeback_control *wbc)
9036 {
9037 struct extent_io_tree *tree;
9038
9039 tree = &BTRFS_I(mapping->host)->io_tree;
9040 return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
9041 }
9042
9043 static int
9044 btrfs_readpages(struct file *file, struct address_space *mapping,
9045 struct list_head *pages, unsigned nr_pages)
9046 {
9047 struct extent_io_tree *tree;
9048 tree = &BTRFS_I(mapping->host)->io_tree;
9049 return extent_readpages(tree, mapping, pages, nr_pages,
9050 btrfs_get_extent);
9051 }
9052 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
9053 {
9054 struct extent_io_tree *tree;
9055 struct extent_map_tree *map;
9056 int ret;
9057
9058 tree = &BTRFS_I(page->mapping->host)->io_tree;
9059 map = &BTRFS_I(page->mapping->host)->extent_tree;
9060 ret = try_release_extent_mapping(map, tree, page, gfp_flags);
9061 if (ret == 1) {
9062 ClearPagePrivate(page);
9063 set_page_private(page, 0);
9064 put_page(page);
9065 }
9066 return ret;
9067 }
9068
9069 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
9070 {
9071 if (PageWriteback(page) || PageDirty(page))
9072 return 0;
9073 return __btrfs_releasepage(page, gfp_flags);
9074 }
9075
9076 static void btrfs_invalidatepage(struct page *page, unsigned int offset,
9077 unsigned int length)
9078 {
9079 struct inode *inode = page->mapping->host;
9080 struct extent_io_tree *tree;
9081 struct btrfs_ordered_extent *ordered;
9082 struct extent_state *cached_state = NULL;
9083 u64 page_start = page_offset(page);
9084 u64 page_end = page_start + PAGE_SIZE - 1;
9085 u64 start;
9086 u64 end;
9087 int inode_evicting = inode->i_state & I_FREEING;
9088
9089 /*
9090 * we have the page locked, so new writeback can't start,
9091 * and the dirty bit won't be cleared while we are here.
9092 *
9093 * Wait for IO on this page so that we can safely clear
9094 * the PagePrivate2 bit and do ordered accounting
9095 */
9096 wait_on_page_writeback(page);
9097
9098 tree = &BTRFS_I(inode)->io_tree;
9099 if (offset) {
9100 btrfs_releasepage(page, GFP_NOFS);
9101 return;
9102 }
9103
9104 if (!inode_evicting)
9105 lock_extent_bits(tree, page_start, page_end, &cached_state);
9106 again:
9107 start = page_start;
9108 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start,
9109 page_end - start + 1);
9110 if (ordered) {
9111 end = min(page_end, ordered->file_offset + ordered->len - 1);
9112 /*
9113 * IO on this page will never be started, so we need
9114 * to account for any ordered extents now
9115 */
9116 if (!inode_evicting)
9117 clear_extent_bit(tree, start, end,
9118 EXTENT_DIRTY | EXTENT_DELALLOC |
9119 EXTENT_DELALLOC_NEW |
9120 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
9121 EXTENT_DEFRAG, 1, 0, &cached_state,
9122 GFP_NOFS);
9123 /*
9124 * whoever cleared the private bit is responsible
9125 * for the finish_ordered_io
9126 */
9127 if (TestClearPagePrivate2(page)) {
9128 struct btrfs_ordered_inode_tree *tree;
9129 u64 new_len;
9130
9131 tree = &BTRFS_I(inode)->ordered_tree;
9132
9133 spin_lock_irq(&tree->lock);
9134 set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
9135 new_len = start - ordered->file_offset;
9136 if (new_len < ordered->truncated_len)
9137 ordered->truncated_len = new_len;
9138 spin_unlock_irq(&tree->lock);
9139
9140 if (btrfs_dec_test_ordered_pending(inode, &ordered,
9141 start,
9142 end - start + 1, 1))
9143 btrfs_finish_ordered_io(ordered);
9144 }
9145 btrfs_put_ordered_extent(ordered);
9146 if (!inode_evicting) {
9147 cached_state = NULL;
9148 lock_extent_bits(tree, start, end,
9149 &cached_state);
9150 }
9151
9152 start = end + 1;
9153 if (start < page_end)
9154 goto again;
9155 }
9156
9157 /*
9158 * Qgroup reserved space handler
9159 * Page here will be either
9160 * 1) Already written to disk
9161 * In this case, its reserved space is released from data rsv map
9162 * and will be freed by delayed_ref handler finally.
9163 * So even we call qgroup_free_data(), it won't decrease reserved
9164 * space.
9165 * 2) Not written to disk
9166 * This means the reserved space should be freed here. However,
9167 * if a truncate invalidates the page (by clearing PageDirty)
9168 * and the page is accounted for while allocating extent
9169 * in btrfs_check_data_free_space() we let delayed_ref to
9170 * free the entire extent.
9171 */
9172 if (PageDirty(page))
9173 btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE);
9174 if (!inode_evicting) {
9175 clear_extent_bit(tree, page_start, page_end,
9176 EXTENT_LOCKED | EXTENT_DIRTY |
9177 EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
9178 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1,
9179 &cached_state, GFP_NOFS);
9180
9181 __btrfs_releasepage(page, GFP_NOFS);
9182 }
9183
9184 ClearPageChecked(page);
9185 if (PagePrivate(page)) {
9186 ClearPagePrivate(page);
9187 set_page_private(page, 0);
9188 put_page(page);
9189 }
9190 }
9191
9192 /*
9193 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
9194 * called from a page fault handler when a page is first dirtied. Hence we must
9195 * be careful to check for EOF conditions here. We set the page up correctly
9196 * for a written page which means we get ENOSPC checking when writing into
9197 * holes and correct delalloc and unwritten extent mapping on filesystems that
9198 * support these features.
9199 *
9200 * We are not allowed to take the i_mutex here so we have to play games to
9201 * protect against truncate races as the page could now be beyond EOF. Because
9202 * vmtruncate() writes the inode size before removing pages, once we have the
9203 * page lock we can determine safely if the page is beyond EOF. If it is not
9204 * beyond EOF, then the page is guaranteed safe against truncation until we
9205 * unlock the page.
9206 */
9207 int btrfs_page_mkwrite(struct vm_fault *vmf)
9208 {
9209 struct page *page = vmf->page;
9210 struct inode *inode = file_inode(vmf->vma->vm_file);
9211 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
9212 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
9213 struct btrfs_ordered_extent *ordered;
9214 struct extent_state *cached_state = NULL;
9215 struct extent_changeset *data_reserved = NULL;
9216 char *kaddr;
9217 unsigned long zero_start;
9218 loff_t size;
9219 int ret;
9220 int reserved = 0;
9221 u64 reserved_space;
9222 u64 page_start;
9223 u64 page_end;
9224 u64 end;
9225
9226 reserved_space = PAGE_SIZE;
9227
9228 sb_start_pagefault(inode->i_sb);
9229 page_start = page_offset(page);
9230 page_end = page_start + PAGE_SIZE - 1;
9231 end = page_end;
9232
9233 /*
9234 * Reserving delalloc space after obtaining the page lock can lead to
9235 * deadlock. For example, if a dirty page is locked by this function
9236 * and the call to btrfs_delalloc_reserve_space() ends up triggering
9237 * dirty page write out, then the btrfs_writepage() function could
9238 * end up waiting indefinitely to get a lock on the page currently
9239 * being processed by btrfs_page_mkwrite() function.
9240 */
9241 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
9242 reserved_space);
9243 if (!ret) {
9244 ret = file_update_time(vmf->vma->vm_file);
9245 reserved = 1;
9246 }
9247 if (ret) {
9248 if (ret == -ENOMEM)
9249 ret = VM_FAULT_OOM;
9250 else /* -ENOSPC, -EIO, etc */
9251 ret = VM_FAULT_SIGBUS;
9252 if (reserved)
9253 goto out;
9254 goto out_noreserve;
9255 }
9256
9257 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
9258 again:
9259 lock_page(page);
9260 size = i_size_read(inode);
9261
9262 if ((page->mapping != inode->i_mapping) ||
9263 (page_start >= size)) {
9264 /* page got truncated out from underneath us */
9265 goto out_unlock;
9266 }
9267 wait_on_page_writeback(page);
9268
9269 lock_extent_bits(io_tree, page_start, page_end, &cached_state);
9270 set_page_extent_mapped(page);
9271
9272 /*
9273 * we can't set the delalloc bits if there are pending ordered
9274 * extents. Drop our locks and wait for them to finish
9275 */
9276 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start,
9277 PAGE_SIZE);
9278 if (ordered) {
9279 unlock_extent_cached(io_tree, page_start, page_end,
9280 &cached_state, GFP_NOFS);
9281 unlock_page(page);
9282 btrfs_start_ordered_extent(inode, ordered, 1);
9283 btrfs_put_ordered_extent(ordered);
9284 goto again;
9285 }
9286
9287 if (page->index == ((size - 1) >> PAGE_SHIFT)) {
9288 reserved_space = round_up(size - page_start,
9289 fs_info->sectorsize);
9290 if (reserved_space < PAGE_SIZE) {
9291 end = page_start + reserved_space - 1;
9292 spin_lock(&BTRFS_I(inode)->lock);
9293 BTRFS_I(inode)->outstanding_extents++;
9294 spin_unlock(&BTRFS_I(inode)->lock);
9295 btrfs_delalloc_release_space(inode, data_reserved,
9296 page_start, PAGE_SIZE - reserved_space);
9297 }
9298 }
9299
9300 /*
9301 * page_mkwrite gets called when the page is firstly dirtied after it's
9302 * faulted in, but write(2) could also dirty a page and set delalloc
9303 * bits, thus in this case for space account reason, we still need to
9304 * clear any delalloc bits within this page range since we have to
9305 * reserve data&meta space before lock_page() (see above comments).
9306 */
9307 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
9308 EXTENT_DIRTY | EXTENT_DELALLOC |
9309 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
9310 0, 0, &cached_state, GFP_NOFS);
9311
9312 ret = btrfs_set_extent_delalloc(inode, page_start, end,
9313 &cached_state, 0);
9314 if (ret) {
9315 unlock_extent_cached(io_tree, page_start, page_end,
9316 &cached_state, GFP_NOFS);
9317 ret = VM_FAULT_SIGBUS;
9318 goto out_unlock;
9319 }
9320 ret = 0;
9321
9322 /* page is wholly or partially inside EOF */
9323 if (page_start + PAGE_SIZE > size)
9324 zero_start = size & ~PAGE_MASK;
9325 else
9326 zero_start = PAGE_SIZE;
9327
9328 if (zero_start != PAGE_SIZE) {
9329 kaddr = kmap(page);
9330 memset(kaddr + zero_start, 0, PAGE_SIZE - zero_start);
9331 flush_dcache_page(page);
9332 kunmap(page);
9333 }
9334 ClearPageChecked(page);
9335 set_page_dirty(page);
9336 SetPageUptodate(page);
9337
9338 BTRFS_I(inode)->last_trans = fs_info->generation;
9339 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
9340 BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
9341
9342 unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
9343
9344 out_unlock:
9345 if (!ret) {
9346 sb_end_pagefault(inode->i_sb);
9347 extent_changeset_free(data_reserved);
9348 return VM_FAULT_LOCKED;
9349 }
9350 unlock_page(page);
9351 out:
9352 btrfs_delalloc_release_space(inode, data_reserved, page_start,
9353 reserved_space);
9354 out_noreserve:
9355 sb_end_pagefault(inode->i_sb);
9356 extent_changeset_free(data_reserved);
9357 return ret;
9358 }
9359
9360 static int btrfs_truncate(struct inode *inode)
9361 {
9362 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
9363 struct btrfs_root *root = BTRFS_I(inode)->root;
9364 struct btrfs_block_rsv *rsv;
9365 int ret = 0;
9366 int err = 0;
9367 struct btrfs_trans_handle *trans;
9368 u64 mask = fs_info->sectorsize - 1;
9369 u64 min_size = btrfs_calc_trunc_metadata_size(fs_info, 1);
9370
9371 ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask),
9372 (u64)-1);
9373 if (ret)
9374 return ret;
9375
9376 /*
9377 * Yes ladies and gentlemen, this is indeed ugly. The fact is we have
9378 * 3 things going on here
9379 *
9380 * 1) We need to reserve space for our orphan item and the space to
9381 * delete our orphan item. Lord knows we don't want to have a dangling
9382 * orphan item because we didn't reserve space to remove it.
9383 *
9384 * 2) We need to reserve space to update our inode.
9385 *
9386 * 3) We need to have something to cache all the space that is going to
9387 * be free'd up by the truncate operation, but also have some slack
9388 * space reserved in case it uses space during the truncate (thank you
9389 * very much snapshotting).
9390 *
9391 * And we need these to all be separate. The fact is we can use a lot of
9392 * space doing the truncate, and we have no earthly idea how much space
9393 * we will use, so we need the truncate reservation to be separate so it
9394 * doesn't end up using space reserved for updating the inode or
9395 * removing the orphan item. We also need to be able to stop the
9396 * transaction and start a new one, which means we need to be able to
9397 * update the inode several times, and we have no idea of knowing how
9398 * many times that will be, so we can't just reserve 1 item for the
9399 * entirety of the operation, so that has to be done separately as well.
9400 * Then there is the orphan item, which does indeed need to be held on
9401 * to for the whole operation, and we need nobody to touch this reserved
9402 * space except the orphan code.
9403 *
9404 * So that leaves us with
9405 *
9406 * 1) root->orphan_block_rsv - for the orphan deletion.
9407 * 2) rsv - for the truncate reservation, which we will steal from the
9408 * transaction reservation.
9409 * 3) fs_info->trans_block_rsv - this will have 1 items worth left for
9410 * updating the inode.
9411 */
9412 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
9413 if (!rsv)
9414 return -ENOMEM;
9415 rsv->size = min_size;
9416 rsv->failfast = 1;
9417
9418 /*
9419 * 1 for the truncate slack space
9420 * 1 for updating the inode.
9421 */
9422 trans = btrfs_start_transaction(root, 2);
9423 if (IS_ERR(trans)) {
9424 err = PTR_ERR(trans);
9425 goto out;
9426 }
9427
9428 /* Migrate the slack space for the truncate to our reserve */
9429 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
9430 min_size, 0);
9431 BUG_ON(ret);
9432
9433 /*
9434 * So if we truncate and then write and fsync we normally would just
9435 * write the extents that changed, which is a problem if we need to
9436 * first truncate that entire inode. So set this flag so we write out
9437 * all of the extents in the inode to the sync log so we're completely
9438 * safe.
9439 */
9440 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
9441 trans->block_rsv = rsv;
9442
9443 while (1) {
9444 ret = btrfs_truncate_inode_items(trans, root, inode,
9445 inode->i_size,
9446 BTRFS_EXTENT_DATA_KEY);
9447 if (ret != -ENOSPC && ret != -EAGAIN) {
9448 err = ret;
9449 break;
9450 }
9451
9452 trans->block_rsv = &fs_info->trans_block_rsv;
9453 ret = btrfs_update_inode(trans, root, inode);
9454 if (ret) {
9455 err = ret;
9456 break;
9457 }
9458
9459 btrfs_end_transaction(trans);
9460 btrfs_btree_balance_dirty(fs_info);
9461
9462 trans = btrfs_start_transaction(root, 2);
9463 if (IS_ERR(trans)) {
9464 ret = err = PTR_ERR(trans);
9465 trans = NULL;
9466 break;
9467 }
9468
9469 btrfs_block_rsv_release(fs_info, rsv, -1);
9470 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
9471 rsv, min_size, 0);
9472 BUG_ON(ret); /* shouldn't happen */
9473 trans->block_rsv = rsv;
9474 }
9475
9476 if (ret == 0 && inode->i_nlink > 0) {
9477 trans->block_rsv = root->orphan_block_rsv;
9478 ret = btrfs_orphan_del(trans, BTRFS_I(inode));
9479 if (ret)
9480 err = ret;
9481 }
9482
9483 if (trans) {
9484 trans->block_rsv = &fs_info->trans_block_rsv;
9485 ret = btrfs_update_inode(trans, root, inode);
9486 if (ret && !err)
9487 err = ret;
9488
9489 ret = btrfs_end_transaction(trans);
9490 btrfs_btree_balance_dirty(fs_info);
9491 }
9492 out:
9493 btrfs_free_block_rsv(fs_info, rsv);
9494
9495 if (ret && !err)
9496 err = ret;
9497
9498 return err;
9499 }
9500
9501 /*
9502 * create a new subvolume directory/inode (helper for the ioctl).
9503 */
9504 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
9505 struct btrfs_root *new_root,
9506 struct btrfs_root *parent_root,
9507 u64 new_dirid)
9508 {
9509 struct inode *inode;
9510 int err;
9511 u64 index = 0;
9512
9513 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2,
9514 new_dirid, new_dirid,
9515 S_IFDIR | (~current_umask() & S_IRWXUGO),
9516 &index);
9517 if (IS_ERR(inode))
9518 return PTR_ERR(inode);
9519 inode->i_op = &btrfs_dir_inode_operations;
9520 inode->i_fop = &btrfs_dir_file_operations;
9521
9522 set_nlink(inode, 1);
9523 btrfs_i_size_write(BTRFS_I(inode), 0);
9524 unlock_new_inode(inode);
9525
9526 err = btrfs_subvol_inherit_props(trans, new_root, parent_root);
9527 if (err)
9528 btrfs_err(new_root->fs_info,
9529 "error inheriting subvolume %llu properties: %d",
9530 new_root->root_key.objectid, err);
9531
9532 err = btrfs_update_inode(trans, new_root, inode);
9533
9534 iput(inode);
9535 return err;
9536 }
9537
9538 struct inode *btrfs_alloc_inode(struct super_block *sb)
9539 {
9540 struct btrfs_inode *ei;
9541 struct inode *inode;
9542
9543 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
9544 if (!ei)
9545 return NULL;
9546
9547 ei->root = NULL;
9548 ei->generation = 0;
9549 ei->last_trans = 0;
9550 ei->last_sub_trans = 0;
9551 ei->logged_trans = 0;
9552 ei->delalloc_bytes = 0;
9553 ei->new_delalloc_bytes = 0;
9554 ei->defrag_bytes = 0;
9555 ei->disk_i_size = 0;
9556 ei->flags = 0;
9557 ei->csum_bytes = 0;
9558 ei->index_cnt = (u64)-1;
9559 ei->dir_index = 0;
9560 ei->last_unlink_trans = 0;
9561 ei->last_link_trans = 0;
9562 ei->last_log_commit = 0;
9563 ei->delayed_iput_count = 0;
9564
9565 spin_lock_init(&ei->lock);
9566 ei->outstanding_extents = 0;
9567 ei->reserved_extents = 0;
9568
9569 ei->runtime_flags = 0;
9570 ei->prop_compress = BTRFS_COMPRESS_NONE;
9571 ei->defrag_compress = BTRFS_COMPRESS_NONE;
9572
9573 ei->delayed_node = NULL;
9574
9575 ei->i_otime.tv_sec = 0;
9576 ei->i_otime.tv_nsec = 0;
9577
9578 inode = &ei->vfs_inode;
9579 extent_map_tree_init(&ei->extent_tree);
9580 extent_io_tree_init(&ei->io_tree, inode);
9581 extent_io_tree_init(&ei->io_failure_tree, inode);
9582 ei->io_tree.track_uptodate = 1;
9583 ei->io_failure_tree.track_uptodate = 1;
9584 atomic_set(&ei->sync_writers, 0);
9585 mutex_init(&ei->log_mutex);
9586 mutex_init(&ei->delalloc_mutex);
9587 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
9588 INIT_LIST_HEAD(&ei->delalloc_inodes);
9589 INIT_LIST_HEAD(&ei->delayed_iput);
9590 RB_CLEAR_NODE(&ei->rb_node);
9591 init_rwsem(&ei->dio_sem);
9592
9593 return inode;
9594 }
9595
9596 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
9597 void btrfs_test_destroy_inode(struct inode *inode)
9598 {
9599 btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0);
9600 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
9601 }
9602 #endif
9603
9604 static void btrfs_i_callback(struct rcu_head *head)
9605 {
9606 struct inode *inode = container_of(head, struct inode, i_rcu);
9607 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
9608 }
9609
9610 void btrfs_destroy_inode(struct inode *inode)
9611 {
9612 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
9613 struct btrfs_ordered_extent *ordered;
9614 struct btrfs_root *root = BTRFS_I(inode)->root;
9615
9616 WARN_ON(!hlist_empty(&inode->i_dentry));
9617 WARN_ON(inode->i_data.nrpages);
9618 WARN_ON(BTRFS_I(inode)->outstanding_extents);
9619 WARN_ON(BTRFS_I(inode)->reserved_extents);
9620 WARN_ON(BTRFS_I(inode)->delalloc_bytes);
9621 WARN_ON(BTRFS_I(inode)->new_delalloc_bytes);
9622 WARN_ON(BTRFS_I(inode)->csum_bytes);
9623 WARN_ON(BTRFS_I(inode)->defrag_bytes);
9624
9625 /*
9626 * This can happen where we create an inode, but somebody else also
9627 * created the same inode and we need to destroy the one we already
9628 * created.
9629 */
9630 if (!root)
9631 goto free;
9632
9633 if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
9634 &BTRFS_I(inode)->runtime_flags)) {
9635 btrfs_info(fs_info, "inode %llu still on the orphan list",
9636 btrfs_ino(BTRFS_I(inode)));
9637 atomic_dec(&root->orphan_inodes);
9638 }
9639
9640 while (1) {
9641 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
9642 if (!ordered)
9643 break;
9644 else {
9645 btrfs_err(fs_info,
9646 "found ordered extent %llu %llu on inode cleanup",
9647 ordered->file_offset, ordered->len);
9648 btrfs_remove_ordered_extent(inode, ordered);
9649 btrfs_put_ordered_extent(ordered);
9650 btrfs_put_ordered_extent(ordered);
9651 }
9652 }
9653 btrfs_qgroup_check_reserved_leak(inode);
9654 inode_tree_del(inode);
9655 btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0);
9656 free:
9657 call_rcu(&inode->i_rcu, btrfs_i_callback);
9658 }
9659
9660 int btrfs_drop_inode(struct inode *inode)
9661 {
9662 struct btrfs_root *root = BTRFS_I(inode)->root;
9663
9664 if (root == NULL)
9665 return 1;
9666
9667 /* the snap/subvol tree is on deleting */
9668 if (btrfs_root_refs(&root->root_item) == 0)
9669 return 1;
9670 else
9671 return generic_drop_inode(inode);
9672 }
9673
9674 static void init_once(void *foo)
9675 {
9676 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
9677
9678 inode_init_once(&ei->vfs_inode);
9679 }
9680
9681 void btrfs_destroy_cachep(void)
9682 {
9683 /*
9684 * Make sure all delayed rcu free inodes are flushed before we
9685 * destroy cache.
9686 */
9687 rcu_barrier();
9688 kmem_cache_destroy(btrfs_inode_cachep);
9689 kmem_cache_destroy(btrfs_trans_handle_cachep);
9690 kmem_cache_destroy(btrfs_path_cachep);
9691 kmem_cache_destroy(btrfs_free_space_cachep);
9692 }
9693
9694 int btrfs_init_cachep(void)
9695 {
9696 btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
9697 sizeof(struct btrfs_inode), 0,
9698 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT,
9699 init_once);
9700 if (!btrfs_inode_cachep)
9701 goto fail;
9702
9703 btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
9704 sizeof(struct btrfs_trans_handle), 0,
9705 SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL);
9706 if (!btrfs_trans_handle_cachep)
9707 goto fail;
9708
9709 btrfs_path_cachep = kmem_cache_create("btrfs_path",
9710 sizeof(struct btrfs_path), 0,
9711 SLAB_MEM_SPREAD, NULL);
9712 if (!btrfs_path_cachep)
9713 goto fail;
9714
9715 btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
9716 sizeof(struct btrfs_free_space), 0,
9717 SLAB_MEM_SPREAD, NULL);
9718 if (!btrfs_free_space_cachep)
9719 goto fail;
9720
9721 return 0;
9722 fail:
9723 btrfs_destroy_cachep();
9724 return -ENOMEM;
9725 }
9726
9727 static int btrfs_getattr(const struct path *path, struct kstat *stat,
9728 u32 request_mask, unsigned int flags)
9729 {
9730 u64 delalloc_bytes;
9731 struct inode *inode = d_inode(path->dentry);
9732 u32 blocksize = inode->i_sb->s_blocksize;
9733 u32 bi_flags = BTRFS_I(inode)->flags;
9734
9735 stat->result_mask |= STATX_BTIME;
9736 stat->btime.tv_sec = BTRFS_I(inode)->i_otime.tv_sec;
9737 stat->btime.tv_nsec = BTRFS_I(inode)->i_otime.tv_nsec;
9738 if (bi_flags & BTRFS_INODE_APPEND)
9739 stat->attributes |= STATX_ATTR_APPEND;
9740 if (bi_flags & BTRFS_INODE_COMPRESS)
9741 stat->attributes |= STATX_ATTR_COMPRESSED;
9742 if (bi_flags & BTRFS_INODE_IMMUTABLE)
9743 stat->attributes |= STATX_ATTR_IMMUTABLE;
9744 if (bi_flags & BTRFS_INODE_NODUMP)
9745 stat->attributes |= STATX_ATTR_NODUMP;
9746
9747 stat->attributes_mask |= (STATX_ATTR_APPEND |
9748 STATX_ATTR_COMPRESSED |
9749 STATX_ATTR_IMMUTABLE |
9750 STATX_ATTR_NODUMP);
9751
9752 generic_fillattr(inode, stat);
9753 stat->dev = BTRFS_I(inode)->root->anon_dev;
9754
9755 spin_lock(&BTRFS_I(inode)->lock);
9756 delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes;
9757 spin_unlock(&BTRFS_I(inode)->lock);
9758 stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
9759 ALIGN(delalloc_bytes, blocksize)) >> 9;
9760 return 0;
9761 }
9762
9763 static int btrfs_rename_exchange(struct inode *old_dir,
9764 struct dentry *old_dentry,
9765 struct inode *new_dir,
9766 struct dentry *new_dentry)
9767 {
9768 struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
9769 struct btrfs_trans_handle *trans;
9770 struct btrfs_root *root = BTRFS_I(old_dir)->root;
9771 struct btrfs_root *dest = BTRFS_I(new_dir)->root;
9772 struct inode *new_inode = new_dentry->d_inode;
9773 struct inode *old_inode = old_dentry->d_inode;
9774 struct timespec ctime = current_time(old_inode);
9775 struct dentry *parent;
9776 u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
9777 u64 new_ino = btrfs_ino(BTRFS_I(new_inode));
9778 u64 old_idx = 0;
9779 u64 new_idx = 0;
9780 u64 root_objectid;
9781 int ret;
9782 int ret2;
9783 bool root_log_pinned = false;
9784 bool dest_log_pinned = false;
9785
9786 /* we only allow rename subvolume link between subvolumes */
9787 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
9788 return -EXDEV;
9789
9790 /* close the race window with snapshot create/destroy ioctl */
9791 if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9792 down_read(&fs_info->subvol_sem);
9793 if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
9794 down_read(&fs_info->subvol_sem);
9795
9796 /*
9797 * We want to reserve the absolute worst case amount of items. So if
9798 * both inodes are subvols and we need to unlink them then that would
9799 * require 4 item modifications, but if they are both normal inodes it
9800 * would require 5 item modifications, so we'll assume their normal
9801 * inodes. So 5 * 2 is 10, plus 2 for the new links, so 12 total items
9802 * should cover the worst case number of items we'll modify.
9803 */
9804 trans = btrfs_start_transaction(root, 12);
9805 if (IS_ERR(trans)) {
9806 ret = PTR_ERR(trans);
9807 goto out_notrans;
9808 }
9809
9810 /*
9811 * We need to find a free sequence number both in the source and
9812 * in the destination directory for the exchange.
9813 */
9814 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx);
9815 if (ret)
9816 goto out_fail;
9817 ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx);
9818 if (ret)
9819 goto out_fail;
9820
9821 BTRFS_I(old_inode)->dir_index = 0ULL;
9822 BTRFS_I(new_inode)->dir_index = 0ULL;
9823
9824 /* Reference for the source. */
9825 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
9826 /* force full log commit if subvolume involved. */
9827 btrfs_set_log_full_commit(fs_info, trans);
9828 } else {
9829 btrfs_pin_log_trans(root);
9830 root_log_pinned = true;
9831 ret = btrfs_insert_inode_ref(trans, dest,
9832 new_dentry->d_name.name,
9833 new_dentry->d_name.len,
9834 old_ino,
9835 btrfs_ino(BTRFS_I(new_dir)),
9836 old_idx);
9837 if (ret)
9838 goto out_fail;
9839 }
9840
9841 /* And now for the dest. */
9842 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
9843 /* force full log commit if subvolume involved. */
9844 btrfs_set_log_full_commit(fs_info, trans);
9845 } else {
9846 btrfs_pin_log_trans(dest);
9847 dest_log_pinned = true;
9848 ret = btrfs_insert_inode_ref(trans, root,
9849 old_dentry->d_name.name,
9850 old_dentry->d_name.len,
9851 new_ino,
9852 btrfs_ino(BTRFS_I(old_dir)),
9853 new_idx);
9854 if (ret)
9855 goto out_fail;
9856 }
9857
9858 /* Update inode version and ctime/mtime. */
9859 inode_inc_iversion(old_dir);
9860 inode_inc_iversion(new_dir);
9861 inode_inc_iversion(old_inode);
9862 inode_inc_iversion(new_inode);
9863 old_dir->i_ctime = old_dir->i_mtime = ctime;
9864 new_dir->i_ctime = new_dir->i_mtime = ctime;
9865 old_inode->i_ctime = ctime;
9866 new_inode->i_ctime = ctime;
9867
9868 if (old_dentry->d_parent != new_dentry->d_parent) {
9869 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
9870 BTRFS_I(old_inode), 1);
9871 btrfs_record_unlink_dir(trans, BTRFS_I(new_dir),
9872 BTRFS_I(new_inode), 1);
9873 }
9874
9875 /* src is a subvolume */
9876 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
9877 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
9878 ret = btrfs_unlink_subvol(trans, root, old_dir,
9879 root_objectid,
9880 old_dentry->d_name.name,
9881 old_dentry->d_name.len);
9882 } else { /* src is an inode */
9883 ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir),
9884 BTRFS_I(old_dentry->d_inode),
9885 old_dentry->d_name.name,
9886 old_dentry->d_name.len);
9887 if (!ret)
9888 ret = btrfs_update_inode(trans, root, old_inode);
9889 }
9890 if (ret) {
9891 btrfs_abort_transaction(trans, ret);
9892 goto out_fail;
9893 }
9894
9895 /* dest is a subvolume */
9896 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
9897 root_objectid = BTRFS_I(new_inode)->root->root_key.objectid;
9898 ret = btrfs_unlink_subvol(trans, dest, new_dir,
9899 root_objectid,
9900 new_dentry->d_name.name,
9901 new_dentry->d_name.len);
9902 } else { /* dest is an inode */
9903 ret = __btrfs_unlink_inode(trans, dest, BTRFS_I(new_dir),
9904 BTRFS_I(new_dentry->d_inode),
9905 new_dentry->d_name.name,
9906 new_dentry->d_name.len);
9907 if (!ret)
9908 ret = btrfs_update_inode(trans, dest, new_inode);
9909 }
9910 if (ret) {
9911 btrfs_abort_transaction(trans, ret);
9912 goto out_fail;
9913 }
9914
9915 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
9916 new_dentry->d_name.name,
9917 new_dentry->d_name.len, 0, old_idx);
9918 if (ret) {
9919 btrfs_abort_transaction(trans, ret);
9920 goto out_fail;
9921 }
9922
9923 ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode),
9924 old_dentry->d_name.name,
9925 old_dentry->d_name.len, 0, new_idx);
9926 if (ret) {
9927 btrfs_abort_transaction(trans, ret);
9928 goto out_fail;
9929 }
9930
9931 if (old_inode->i_nlink == 1)
9932 BTRFS_I(old_inode)->dir_index = old_idx;
9933 if (new_inode->i_nlink == 1)
9934 BTRFS_I(new_inode)->dir_index = new_idx;
9935
9936 if (root_log_pinned) {
9937 parent = new_dentry->d_parent;
9938 btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir),
9939 parent);
9940 btrfs_end_log_trans(root);
9941 root_log_pinned = false;
9942 }
9943 if (dest_log_pinned) {
9944 parent = old_dentry->d_parent;
9945 btrfs_log_new_name(trans, BTRFS_I(new_inode), BTRFS_I(new_dir),
9946 parent);
9947 btrfs_end_log_trans(dest);
9948 dest_log_pinned = false;
9949 }
9950 out_fail:
9951 /*
9952 * If we have pinned a log and an error happened, we unpin tasks
9953 * trying to sync the log and force them to fallback to a transaction
9954 * commit if the log currently contains any of the inodes involved in
9955 * this rename operation (to ensure we do not persist a log with an
9956 * inconsistent state for any of these inodes or leading to any
9957 * inconsistencies when replayed). If the transaction was aborted, the
9958 * abortion reason is propagated to userspace when attempting to commit
9959 * the transaction. If the log does not contain any of these inodes, we
9960 * allow the tasks to sync it.
9961 */
9962 if (ret && (root_log_pinned || dest_log_pinned)) {
9963 if (btrfs_inode_in_log(BTRFS_I(old_dir), fs_info->generation) ||
9964 btrfs_inode_in_log(BTRFS_I(new_dir), fs_info->generation) ||
9965 btrfs_inode_in_log(BTRFS_I(old_inode), fs_info->generation) ||
9966 (new_inode &&
9967 btrfs_inode_in_log(BTRFS_I(new_inode), fs_info->generation)))
9968 btrfs_set_log_full_commit(fs_info, trans);
9969
9970 if (root_log_pinned) {
9971 btrfs_end_log_trans(root);
9972 root_log_pinned = false;
9973 }
9974 if (dest_log_pinned) {
9975 btrfs_end_log_trans(dest);
9976 dest_log_pinned = false;
9977 }
9978 }
9979 ret2 = btrfs_end_transaction(trans);
9980 ret = ret ? ret : ret2;
9981 out_notrans:
9982 if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
9983 up_read(&fs_info->subvol_sem);
9984 if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9985 up_read(&fs_info->subvol_sem);
9986
9987 return ret;
9988 }
9989
9990 static int btrfs_whiteout_for_rename(struct btrfs_trans_handle *trans,
9991 struct btrfs_root *root,
9992 struct inode *dir,
9993 struct dentry *dentry)
9994 {
9995 int ret;
9996 struct inode *inode;
9997 u64 objectid;
9998 u64 index;
9999
10000 ret = btrfs_find_free_ino(root, &objectid);
10001 if (ret)
10002 return ret;
10003
10004 inode = btrfs_new_inode(trans, root, dir,
10005 dentry->d_name.name,
10006 dentry->d_name.len,
10007 btrfs_ino(BTRFS_I(dir)),
10008 objectid,
10009 S_IFCHR | WHITEOUT_MODE,
10010 &index);
10011
10012 if (IS_ERR(inode)) {
10013 ret = PTR_ERR(inode);
10014 return ret;
10015 }
10016
10017 inode->i_op = &btrfs_special_inode_operations;
10018 init_special_inode(inode, inode->i_mode,
10019 WHITEOUT_DEV);
10020
10021 ret = btrfs_init_inode_security(trans, inode, dir,
10022 &dentry->d_name);
10023 if (ret)
10024 goto out;
10025
10026 ret = btrfs_add_nondir(trans, BTRFS_I(dir), dentry,
10027 BTRFS_I(inode), 0, index);
10028 if (ret)
10029 goto out;
10030
10031 ret = btrfs_update_inode(trans, root, inode);
10032 out:
10033 unlock_new_inode(inode);
10034 if (ret)
10035 inode_dec_link_count(inode);
10036 iput(inode);
10037
10038 return ret;
10039 }
10040
10041 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
10042 struct inode *new_dir, struct dentry *new_dentry,
10043 unsigned int flags)
10044 {
10045 struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
10046 struct btrfs_trans_handle *trans;
10047 unsigned int trans_num_items;
10048 struct btrfs_root *root = BTRFS_I(old_dir)->root;
10049 struct btrfs_root *dest = BTRFS_I(new_dir)->root;
10050 struct inode *new_inode = d_inode(new_dentry);
10051 struct inode *old_inode = d_inode(old_dentry);
10052 u64 index = 0;
10053 u64 root_objectid;
10054 int ret;
10055 u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
10056 bool log_pinned = false;
10057
10058 if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
10059 return -EPERM;
10060
10061 /* we only allow rename subvolume link between subvolumes */
10062 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
10063 return -EXDEV;
10064
10065 if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
10066 (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID))
10067 return -ENOTEMPTY;
10068
10069 if (S_ISDIR(old_inode->i_mode) && new_inode &&
10070 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
10071 return -ENOTEMPTY;
10072
10073
10074 /* check for collisions, even if the name isn't there */
10075 ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino,
10076 new_dentry->d_name.name,
10077 new_dentry->d_name.len);
10078
10079 if (ret) {
10080 if (ret == -EEXIST) {
10081 /* we shouldn't get
10082 * eexist without a new_inode */
10083 if (WARN_ON(!new_inode)) {
10084 return ret;
10085 }
10086 } else {
10087 /* maybe -EOVERFLOW */
10088 return ret;
10089 }
10090 }
10091 ret = 0;
10092
10093 /*
10094 * we're using rename to replace one file with another. Start IO on it
10095 * now so we don't add too much work to the end of the transaction
10096 */
10097 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
10098 filemap_flush(old_inode->i_mapping);
10099
10100 /* close the racy window with snapshot create/destroy ioctl */
10101 if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
10102 down_read(&fs_info->subvol_sem);
10103 /*
10104 * We want to reserve the absolute worst case amount of items. So if
10105 * both inodes are subvols and we need to unlink them then that would
10106 * require 4 item modifications, but if they are both normal inodes it
10107 * would require 5 item modifications, so we'll assume they are normal
10108 * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items
10109 * should cover the worst case number of items we'll modify.
10110 * If our rename has the whiteout flag, we need more 5 units for the
10111 * new inode (1 inode item, 1 inode ref, 2 dir items and 1 xattr item
10112 * when selinux is enabled).
10113 */
10114 trans_num_items = 11;
10115 if (flags & RENAME_WHITEOUT)
10116 trans_num_items += 5;
10117 trans = btrfs_start_transaction(root, trans_num_items);
10118 if (IS_ERR(trans)) {
10119 ret = PTR_ERR(trans);
10120 goto out_notrans;
10121 }
10122
10123 if (dest != root)
10124 btrfs_record_root_in_trans(trans, dest);
10125
10126 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index);
10127 if (ret)
10128 goto out_fail;
10129
10130 BTRFS_I(old_inode)->dir_index = 0ULL;
10131 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
10132 /* force full log commit if subvolume involved. */
10133 btrfs_set_log_full_commit(fs_info, trans);
10134 } else {
10135 btrfs_pin_log_trans(root);
10136 log_pinned = true;
10137 ret = btrfs_insert_inode_ref(trans, dest,
10138 new_dentry->d_name.name,
10139 new_dentry->d_name.len,
10140 old_ino,
10141 btrfs_ino(BTRFS_I(new_dir)), index);
10142 if (ret)
10143 goto out_fail;
10144 }
10145
10146 inode_inc_iversion(old_dir);
10147 inode_inc_iversion(new_dir);
10148 inode_inc_iversion(old_inode);
10149 old_dir->i_ctime = old_dir->i_mtime =
10150 new_dir->i_ctime = new_dir->i_mtime =
10151 old_inode->i_ctime = current_time(old_dir);
10152
10153 if (old_dentry->d_parent != new_dentry->d_parent)
10154 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
10155 BTRFS_I(old_inode), 1);
10156
10157 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
10158 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
10159 ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
10160 old_dentry->d_name.name,
10161 old_dentry->d_name.len);
10162 } else {
10163 ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir),
10164 BTRFS_I(d_inode(old_dentry)),
10165 old_dentry->d_name.name,
10166 old_dentry->d_name.len);
10167 if (!ret)
10168 ret = btrfs_update_inode(trans, root, old_inode);
10169 }
10170 if (ret) {
10171 btrfs_abort_transaction(trans, ret);
10172 goto out_fail;
10173 }
10174
10175 if (new_inode) {
10176 inode_inc_iversion(new_inode);
10177 new_inode->i_ctime = current_time(new_inode);
10178 if (unlikely(btrfs_ino(BTRFS_I(new_inode)) ==
10179 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
10180 root_objectid = BTRFS_I(new_inode)->location.objectid;
10181 ret = btrfs_unlink_subvol(trans, dest, new_dir,
10182 root_objectid,
10183 new_dentry->d_name.name,
10184 new_dentry->d_name.len);
10185 BUG_ON(new_inode->i_nlink == 0);
10186 } else {
10187 ret = btrfs_unlink_inode(trans, dest, BTRFS_I(new_dir),
10188 BTRFS_I(d_inode(new_dentry)),
10189 new_dentry->d_name.name,
10190 new_dentry->d_name.len);
10191 }
10192 if (!ret && new_inode->i_nlink == 0)
10193 ret = btrfs_orphan_add(trans,
10194 BTRFS_I(d_inode(new_dentry)));
10195 if (ret) {
10196 btrfs_abort_transaction(trans, ret);
10197 goto out_fail;
10198 }
10199 }
10200
10201 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
10202 new_dentry->d_name.name,
10203 new_dentry->d_name.len, 0, index);
10204 if (ret) {
10205 btrfs_abort_transaction(trans, ret);
10206 goto out_fail;
10207 }
10208
10209 if (old_inode->i_nlink == 1)
10210 BTRFS_I(old_inode)->dir_index = index;
10211
10212 if (log_pinned) {
10213 struct dentry *parent = new_dentry->d_parent;
10214
10215 btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir),
10216 parent);
10217 btrfs_end_log_trans(root);
10218 log_pinned = false;
10219 }
10220
10221 if (flags & RENAME_WHITEOUT) {
10222 ret = btrfs_whiteout_for_rename(trans, root, old_dir,
10223 old_dentry);
10224
10225 if (ret) {
10226 btrfs_abort_transaction(trans, ret);
10227 goto out_fail;
10228 }
10229 }
10230 out_fail:
10231 /*
10232 * If we have pinned the log and an error happened, we unpin tasks
10233 * trying to sync the log and force them to fallback to a transaction
10234 * commit if the log currently contains any of the inodes involved in
10235 * this rename operation (to ensure we do not persist a log with an
10236 * inconsistent state for any of these inodes or leading to any
10237 * inconsistencies when replayed). If the transaction was aborted, the
10238 * abortion reason is propagated to userspace when attempting to commit
10239 * the transaction. If the log does not contain any of these inodes, we
10240 * allow the tasks to sync it.
10241 */
10242 if (ret && log_pinned) {
10243 if (btrfs_inode_in_log(BTRFS_I(old_dir), fs_info->generation) ||
10244 btrfs_inode_in_log(BTRFS_I(new_dir), fs_info->generation) ||
10245 btrfs_inode_in_log(BTRFS_I(old_inode), fs_info->generation) ||
10246 (new_inode &&
10247 btrfs_inode_in_log(BTRFS_I(new_inode), fs_info->generation)))
10248 btrfs_set_log_full_commit(fs_info, trans);
10249
10250 btrfs_end_log_trans(root);
10251 log_pinned = false;
10252 }
10253 btrfs_end_transaction(trans);
10254 out_notrans:
10255 if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
10256 up_read(&fs_info->subvol_sem);
10257
10258 return ret;
10259 }
10260
10261 static int btrfs_rename2(struct inode *old_dir, struct dentry *old_dentry,
10262 struct inode *new_dir, struct dentry *new_dentry,
10263 unsigned int flags)
10264 {
10265 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
10266 return -EINVAL;
10267
10268 if (flags & RENAME_EXCHANGE)
10269 return btrfs_rename_exchange(old_dir, old_dentry, new_dir,
10270 new_dentry);
10271
10272 return btrfs_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
10273 }
10274
10275 static void btrfs_run_delalloc_work(struct btrfs_work *work)
10276 {
10277 struct btrfs_delalloc_work *delalloc_work;
10278 struct inode *inode;
10279
10280 delalloc_work = container_of(work, struct btrfs_delalloc_work,
10281 work);
10282 inode = delalloc_work->inode;
10283 filemap_flush(inode->i_mapping);
10284 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
10285 &BTRFS_I(inode)->runtime_flags))
10286 filemap_flush(inode->i_mapping);
10287
10288 if (delalloc_work->delay_iput)
10289 btrfs_add_delayed_iput(inode);
10290 else
10291 iput(inode);
10292 complete(&delalloc_work->completion);
10293 }
10294
10295 struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
10296 int delay_iput)
10297 {
10298 struct btrfs_delalloc_work *work;
10299
10300 work = kmalloc(sizeof(*work), GFP_NOFS);
10301 if (!work)
10302 return NULL;
10303
10304 init_completion(&work->completion);
10305 INIT_LIST_HEAD(&work->list);
10306 work->inode = inode;
10307 work->delay_iput = delay_iput;
10308 WARN_ON_ONCE(!inode);
10309 btrfs_init_work(&work->work, btrfs_flush_delalloc_helper,
10310 btrfs_run_delalloc_work, NULL, NULL);
10311
10312 return work;
10313 }
10314
10315 void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work)
10316 {
10317 wait_for_completion(&work->completion);
10318 kfree(work);
10319 }
10320
10321 /*
10322 * some fairly slow code that needs optimization. This walks the list
10323 * of all the inodes with pending delalloc and forces them to disk.
10324 */
10325 static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput,
10326 int nr)
10327 {
10328 struct btrfs_inode *binode;
10329 struct inode *inode;
10330 struct btrfs_delalloc_work *work, *next;
10331 struct list_head works;
10332 struct list_head splice;
10333 int ret = 0;
10334
10335 INIT_LIST_HEAD(&works);
10336 INIT_LIST_HEAD(&splice);
10337
10338 mutex_lock(&root->delalloc_mutex);
10339 spin_lock(&root->delalloc_lock);
10340 list_splice_init(&root->delalloc_inodes, &splice);
10341 while (!list_empty(&splice)) {
10342 binode = list_entry(splice.next, struct btrfs_inode,
10343 delalloc_inodes);
10344
10345 list_move_tail(&binode->delalloc_inodes,
10346 &root->delalloc_inodes);
10347 inode = igrab(&binode->vfs_inode);
10348 if (!inode) {
10349 cond_resched_lock(&root->delalloc_lock);
10350 continue;
10351 }
10352 spin_unlock(&root->delalloc_lock);
10353
10354 work = btrfs_alloc_delalloc_work(inode, delay_iput);
10355 if (!work) {
10356 if (delay_iput)
10357 btrfs_add_delayed_iput(inode);
10358 else
10359 iput(inode);
10360 ret = -ENOMEM;
10361 goto out;
10362 }
10363 list_add_tail(&work->list, &works);
10364 btrfs_queue_work(root->fs_info->flush_workers,
10365 &work->work);
10366 ret++;
10367 if (nr != -1 && ret >= nr)
10368 goto out;
10369 cond_resched();
10370 spin_lock(&root->delalloc_lock);
10371 }
10372 spin_unlock(&root->delalloc_lock);
10373
10374 out:
10375 list_for_each_entry_safe(work, next, &works, list) {
10376 list_del_init(&work->list);
10377 btrfs_wait_and_free_delalloc_work(work);
10378 }
10379
10380 if (!list_empty_careful(&splice)) {
10381 spin_lock(&root->delalloc_lock);
10382 list_splice_tail(&splice, &root->delalloc_inodes);
10383 spin_unlock(&root->delalloc_lock);
10384 }
10385 mutex_unlock(&root->delalloc_mutex);
10386 return ret;
10387 }
10388
10389 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
10390 {
10391 struct btrfs_fs_info *fs_info = root->fs_info;
10392 int ret;
10393
10394 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
10395 return -EROFS;
10396
10397 ret = __start_delalloc_inodes(root, delay_iput, -1);
10398 if (ret > 0)
10399 ret = 0;
10400 /*
10401 * the filemap_flush will queue IO into the worker threads, but
10402 * we have to make sure the IO is actually started and that
10403 * ordered extents get created before we return
10404 */
10405 atomic_inc(&fs_info->async_submit_draining);
10406 while (atomic_read(&fs_info->nr_async_submits) ||
10407 atomic_read(&fs_info->async_delalloc_pages)) {
10408 wait_event(fs_info->async_submit_wait,
10409 (atomic_read(&fs_info->nr_async_submits) == 0 &&
10410 atomic_read(&fs_info->async_delalloc_pages) == 0));
10411 }
10412 atomic_dec(&fs_info->async_submit_draining);
10413 return ret;
10414 }
10415
10416 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput,
10417 int nr)
10418 {
10419 struct btrfs_root *root;
10420 struct list_head splice;
10421 int ret;
10422
10423 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
10424 return -EROFS;
10425
10426 INIT_LIST_HEAD(&splice);
10427
10428 mutex_lock(&fs_info->delalloc_root_mutex);
10429 spin_lock(&fs_info->delalloc_root_lock);
10430 list_splice_init(&fs_info->delalloc_roots, &splice);
10431 while (!list_empty(&splice) && nr) {
10432 root = list_first_entry(&splice, struct btrfs_root,
10433 delalloc_root);
10434 root = btrfs_grab_fs_root(root);
10435 BUG_ON(!root);
10436 list_move_tail(&root->delalloc_root,
10437 &fs_info->delalloc_roots);
10438 spin_unlock(&fs_info->delalloc_root_lock);
10439
10440 ret = __start_delalloc_inodes(root, delay_iput, nr);
10441 btrfs_put_fs_root(root);
10442 if (ret < 0)
10443 goto out;
10444
10445 if (nr != -1) {
10446 nr -= ret;
10447 WARN_ON(nr < 0);
10448 }
10449 spin_lock(&fs_info->delalloc_root_lock);
10450 }
10451 spin_unlock(&fs_info->delalloc_root_lock);
10452
10453 ret = 0;
10454 atomic_inc(&fs_info->async_submit_draining);
10455 while (atomic_read(&fs_info->nr_async_submits) ||
10456 atomic_read(&fs_info->async_delalloc_pages)) {
10457 wait_event(fs_info->async_submit_wait,
10458 (atomic_read(&fs_info->nr_async_submits) == 0 &&
10459 atomic_read(&fs_info->async_delalloc_pages) == 0));
10460 }
10461 atomic_dec(&fs_info->async_submit_draining);
10462 out:
10463 if (!list_empty_careful(&splice)) {
10464 spin_lock(&fs_info->delalloc_root_lock);
10465 list_splice_tail(&splice, &fs_info->delalloc_roots);
10466 spin_unlock(&fs_info->delalloc_root_lock);
10467 }
10468 mutex_unlock(&fs_info->delalloc_root_mutex);
10469 return ret;
10470 }
10471
10472 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
10473 const char *symname)
10474 {
10475 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
10476 struct btrfs_trans_handle *trans;
10477 struct btrfs_root *root = BTRFS_I(dir)->root;
10478 struct btrfs_path *path;
10479 struct btrfs_key key;
10480 struct inode *inode = NULL;
10481 int err;
10482 int drop_inode = 0;
10483 u64 objectid;
10484 u64 index = 0;
10485 int name_len;
10486 int datasize;
10487 unsigned long ptr;
10488 struct btrfs_file_extent_item *ei;
10489 struct extent_buffer *leaf;
10490
10491 name_len = strlen(symname);
10492 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
10493 return -ENAMETOOLONG;
10494
10495 /*
10496 * 2 items for inode item and ref
10497 * 2 items for dir items
10498 * 1 item for updating parent inode item
10499 * 1 item for the inline extent item
10500 * 1 item for xattr if selinux is on
10501 */
10502 trans = btrfs_start_transaction(root, 7);
10503 if (IS_ERR(trans))
10504 return PTR_ERR(trans);
10505
10506 err = btrfs_find_free_ino(root, &objectid);
10507 if (err)
10508 goto out_unlock;
10509
10510 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
10511 dentry->d_name.len, btrfs_ino(BTRFS_I(dir)),
10512 objectid, S_IFLNK|S_IRWXUGO, &index);
10513 if (IS_ERR(inode)) {
10514 err = PTR_ERR(inode);
10515 goto out_unlock;
10516 }
10517
10518 /*
10519 * If the active LSM wants to access the inode during
10520 * d_instantiate it needs these. Smack checks to see
10521 * if the filesystem supports xattrs by looking at the
10522 * ops vector.
10523 */
10524 inode->i_fop = &btrfs_file_operations;
10525 inode->i_op = &btrfs_file_inode_operations;
10526 inode->i_mapping->a_ops = &btrfs_aops;
10527 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
10528
10529 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
10530 if (err)
10531 goto out_unlock_inode;
10532
10533 path = btrfs_alloc_path();
10534 if (!path) {
10535 err = -ENOMEM;
10536 goto out_unlock_inode;
10537 }
10538 key.objectid = btrfs_ino(BTRFS_I(inode));
10539 key.offset = 0;
10540 key.type = BTRFS_EXTENT_DATA_KEY;
10541 datasize = btrfs_file_extent_calc_inline_size(name_len);
10542 err = btrfs_insert_empty_item(trans, root, path, &key,
10543 datasize);
10544 if (err) {
10545 btrfs_free_path(path);
10546 goto out_unlock_inode;
10547 }
10548 leaf = path->nodes[0];
10549 ei = btrfs_item_ptr(leaf, path->slots[0],
10550 struct btrfs_file_extent_item);
10551 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
10552 btrfs_set_file_extent_type(leaf, ei,
10553 BTRFS_FILE_EXTENT_INLINE);
10554 btrfs_set_file_extent_encryption(leaf, ei, 0);
10555 btrfs_set_file_extent_compression(leaf, ei, 0);
10556 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
10557 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
10558
10559 ptr = btrfs_file_extent_inline_start(ei);
10560 write_extent_buffer(leaf, symname, ptr, name_len);
10561 btrfs_mark_buffer_dirty(leaf);
10562 btrfs_free_path(path);
10563
10564 inode->i_op = &btrfs_symlink_inode_operations;
10565 inode_nohighmem(inode);
10566 inode->i_mapping->a_ops = &btrfs_symlink_aops;
10567 inode_set_bytes(inode, name_len);
10568 btrfs_i_size_write(BTRFS_I(inode), name_len);
10569 err = btrfs_update_inode(trans, root, inode);
10570 /*
10571 * Last step, add directory indexes for our symlink inode. This is the
10572 * last step to avoid extra cleanup of these indexes if an error happens
10573 * elsewhere above.
10574 */
10575 if (!err)
10576 err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry,
10577 BTRFS_I(inode), 0, index);
10578 if (err) {
10579 drop_inode = 1;
10580 goto out_unlock_inode;
10581 }
10582
10583 d_instantiate_new(dentry, inode);
10584
10585 out_unlock:
10586 btrfs_end_transaction(trans);
10587 if (drop_inode) {
10588 inode_dec_link_count(inode);
10589 iput(inode);
10590 }
10591 btrfs_btree_balance_dirty(fs_info);
10592 return err;
10593
10594 out_unlock_inode:
10595 drop_inode = 1;
10596 unlock_new_inode(inode);
10597 goto out_unlock;
10598 }
10599
10600 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
10601 u64 start, u64 num_bytes, u64 min_size,
10602 loff_t actual_len, u64 *alloc_hint,
10603 struct btrfs_trans_handle *trans)
10604 {
10605 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
10606 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
10607 struct extent_map *em;
10608 struct btrfs_root *root = BTRFS_I(inode)->root;
10609 struct btrfs_key ins;
10610 u64 cur_offset = start;
10611 u64 i_size;
10612 u64 cur_bytes;
10613 u64 last_alloc = (u64)-1;
10614 int ret = 0;
10615 bool own_trans = true;
10616 u64 end = start + num_bytes - 1;
10617
10618 if (trans)
10619 own_trans = false;
10620 while (num_bytes > 0) {
10621 if (own_trans) {
10622 trans = btrfs_start_transaction(root, 3);
10623 if (IS_ERR(trans)) {
10624 ret = PTR_ERR(trans);
10625 break;
10626 }
10627 }
10628
10629 cur_bytes = min_t(u64, num_bytes, SZ_256M);
10630 cur_bytes = max(cur_bytes, min_size);
10631 /*
10632 * If we are severely fragmented we could end up with really
10633 * small allocations, so if the allocator is returning small
10634 * chunks lets make its job easier by only searching for those
10635 * sized chunks.
10636 */
10637 cur_bytes = min(cur_bytes, last_alloc);
10638 ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes,
10639 min_size, 0, *alloc_hint, &ins, 1, 0);
10640 if (ret) {
10641 if (own_trans)
10642 btrfs_end_transaction(trans);
10643 break;
10644 }
10645 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
10646
10647 last_alloc = ins.offset;
10648 ret = insert_reserved_file_extent(trans, inode,
10649 cur_offset, ins.objectid,
10650 ins.offset, ins.offset,
10651 ins.offset, 0, 0, 0,
10652 BTRFS_FILE_EXTENT_PREALLOC);
10653 if (ret) {
10654 btrfs_free_reserved_extent(fs_info, ins.objectid,
10655 ins.offset, 0);
10656 btrfs_abort_transaction(trans, ret);
10657 if (own_trans)
10658 btrfs_end_transaction(trans);
10659 break;
10660 }
10661
10662 btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset,
10663 cur_offset + ins.offset -1, 0);
10664
10665 em = alloc_extent_map();
10666 if (!em) {
10667 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
10668 &BTRFS_I(inode)->runtime_flags);
10669 goto next;
10670 }
10671
10672 em->start = cur_offset;
10673 em->orig_start = cur_offset;
10674 em->len = ins.offset;
10675 em->block_start = ins.objectid;
10676 em->block_len = ins.offset;
10677 em->orig_block_len = ins.offset;
10678 em->ram_bytes = ins.offset;
10679 em->bdev = fs_info->fs_devices->latest_bdev;
10680 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
10681 em->generation = trans->transid;
10682
10683 while (1) {
10684 write_lock(&em_tree->lock);
10685 ret = add_extent_mapping(em_tree, em, 1);
10686 write_unlock(&em_tree->lock);
10687 if (ret != -EEXIST)
10688 break;
10689 btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset,
10690 cur_offset + ins.offset - 1,
10691 0);
10692 }
10693 free_extent_map(em);
10694 next:
10695 num_bytes -= ins.offset;
10696 cur_offset += ins.offset;
10697 *alloc_hint = ins.objectid + ins.offset;
10698
10699 inode_inc_iversion(inode);
10700 inode->i_ctime = current_time(inode);
10701 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
10702 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
10703 (actual_len > inode->i_size) &&
10704 (cur_offset > inode->i_size)) {
10705 if (cur_offset > actual_len)
10706 i_size = actual_len;
10707 else
10708 i_size = cur_offset;
10709 i_size_write(inode, i_size);
10710 btrfs_ordered_update_i_size(inode, i_size, NULL);
10711 }
10712
10713 ret = btrfs_update_inode(trans, root, inode);
10714
10715 if (ret) {
10716 btrfs_abort_transaction(trans, ret);
10717 if (own_trans)
10718 btrfs_end_transaction(trans);
10719 break;
10720 }
10721
10722 if (own_trans)
10723 btrfs_end_transaction(trans);
10724 }
10725 if (cur_offset < end)
10726 btrfs_free_reserved_data_space(inode, NULL, cur_offset,
10727 end - cur_offset + 1);
10728 return ret;
10729 }
10730
10731 int btrfs_prealloc_file_range(struct inode *inode, int mode,
10732 u64 start, u64 num_bytes, u64 min_size,
10733 loff_t actual_len, u64 *alloc_hint)
10734 {
10735 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
10736 min_size, actual_len, alloc_hint,
10737 NULL);
10738 }
10739
10740 int btrfs_prealloc_file_range_trans(struct inode *inode,
10741 struct btrfs_trans_handle *trans, int mode,
10742 u64 start, u64 num_bytes, u64 min_size,
10743 loff_t actual_len, u64 *alloc_hint)
10744 {
10745 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
10746 min_size, actual_len, alloc_hint, trans);
10747 }
10748
10749 static int btrfs_set_page_dirty(struct page *page)
10750 {
10751 return __set_page_dirty_nobuffers(page);
10752 }
10753
10754 static int btrfs_permission(struct inode *inode, int mask)
10755 {
10756 struct btrfs_root *root = BTRFS_I(inode)->root;
10757 umode_t mode = inode->i_mode;
10758
10759 if (mask & MAY_WRITE &&
10760 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
10761 if (btrfs_root_readonly(root))
10762 return -EROFS;
10763 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
10764 return -EACCES;
10765 }
10766 return generic_permission(inode, mask);
10767 }
10768
10769 static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
10770 {
10771 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
10772 struct btrfs_trans_handle *trans;
10773 struct btrfs_root *root = BTRFS_I(dir)->root;
10774 struct inode *inode = NULL;
10775 u64 objectid;
10776 u64 index;
10777 int ret = 0;
10778
10779 /*
10780 * 5 units required for adding orphan entry
10781 */
10782 trans = btrfs_start_transaction(root, 5);
10783 if (IS_ERR(trans))
10784 return PTR_ERR(trans);
10785
10786 ret = btrfs_find_free_ino(root, &objectid);
10787 if (ret)
10788 goto out;
10789
10790 inode = btrfs_new_inode(trans, root, dir, NULL, 0,
10791 btrfs_ino(BTRFS_I(dir)), objectid, mode, &index);
10792 if (IS_ERR(inode)) {
10793 ret = PTR_ERR(inode);
10794 inode = NULL;
10795 goto out;
10796 }
10797
10798 inode->i_fop = &btrfs_file_operations;
10799 inode->i_op = &btrfs_file_inode_operations;
10800
10801 inode->i_mapping->a_ops = &btrfs_aops;
10802 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
10803
10804 ret = btrfs_init_inode_security(trans, inode, dir, NULL);
10805 if (ret)
10806 goto out_inode;
10807
10808 ret = btrfs_update_inode(trans, root, inode);
10809 if (ret)
10810 goto out_inode;
10811 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
10812 if (ret)
10813 goto out_inode;
10814
10815 /*
10816 * We set number of links to 0 in btrfs_new_inode(), and here we set
10817 * it to 1 because d_tmpfile() will issue a warning if the count is 0,
10818 * through:
10819 *
10820 * d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
10821 */
10822 set_nlink(inode, 1);
10823 unlock_new_inode(inode);
10824 d_tmpfile(dentry, inode);
10825 mark_inode_dirty(inode);
10826
10827 out:
10828 btrfs_end_transaction(trans);
10829 if (ret)
10830 iput(inode);
10831 btrfs_balance_delayed_items(fs_info);
10832 btrfs_btree_balance_dirty(fs_info);
10833 return ret;
10834
10835 out_inode:
10836 unlock_new_inode(inode);
10837 goto out;
10838
10839 }
10840
10841 __attribute__((const))
10842 static int btrfs_readpage_io_failed_hook(struct page *page, int failed_mirror)
10843 {
10844 return -EAGAIN;
10845 }
10846
10847 static struct btrfs_fs_info *iotree_fs_info(void *private_data)
10848 {
10849 struct inode *inode = private_data;
10850 return btrfs_sb(inode->i_sb);
10851 }
10852
10853 static void btrfs_check_extent_io_range(void *private_data, const char *caller,
10854 u64 start, u64 end)
10855 {
10856 struct inode *inode = private_data;
10857 u64 isize;
10858
10859 isize = i_size_read(inode);
10860 if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
10861 btrfs_debug_rl(BTRFS_I(inode)->root->fs_info,
10862 "%s: ino %llu isize %llu odd range [%llu,%llu]",
10863 caller, btrfs_ino(BTRFS_I(inode)), isize, start, end);
10864 }
10865 }
10866
10867 void btrfs_set_range_writeback(void *private_data, u64 start, u64 end)
10868 {
10869 struct inode *inode = private_data;
10870 unsigned long index = start >> PAGE_SHIFT;
10871 unsigned long end_index = end >> PAGE_SHIFT;
10872 struct page *page;
10873
10874 while (index <= end_index) {
10875 page = find_get_page(inode->i_mapping, index);
10876 ASSERT(page); /* Pages should be in the extent_io_tree */
10877 set_page_writeback(page);
10878 put_page(page);
10879 index++;
10880 }
10881 }
10882
10883 static const struct inode_operations btrfs_dir_inode_operations = {
10884 .getattr = btrfs_getattr,
10885 .lookup = btrfs_lookup,
10886 .create = btrfs_create,
10887 .unlink = btrfs_unlink,
10888 .link = btrfs_link,
10889 .mkdir = btrfs_mkdir,
10890 .rmdir = btrfs_rmdir,
10891 .rename = btrfs_rename2,
10892 .symlink = btrfs_symlink,
10893 .setattr = btrfs_setattr,
10894 .mknod = btrfs_mknod,
10895 .listxattr = btrfs_listxattr,
10896 .permission = btrfs_permission,
10897 .get_acl = btrfs_get_acl,
10898 .set_acl = btrfs_set_acl,
10899 .update_time = btrfs_update_time,
10900 .tmpfile = btrfs_tmpfile,
10901 };
10902 static const struct inode_operations btrfs_dir_ro_inode_operations = {
10903 .lookup = btrfs_lookup,
10904 .permission = btrfs_permission,
10905 .update_time = btrfs_update_time,
10906 };
10907
10908 static const struct file_operations btrfs_dir_file_operations = {
10909 .llseek = generic_file_llseek,
10910 .read = generic_read_dir,
10911 .iterate_shared = btrfs_real_readdir,
10912 .open = btrfs_opendir,
10913 .unlocked_ioctl = btrfs_ioctl,
10914 #ifdef CONFIG_COMPAT
10915 .compat_ioctl = btrfs_compat_ioctl,
10916 #endif
10917 .release = btrfs_release_file,
10918 .fsync = btrfs_sync_file,
10919 };
10920
10921 static const struct extent_io_ops btrfs_extent_io_ops = {
10922 /* mandatory callbacks */
10923 .submit_bio_hook = btrfs_submit_bio_hook,
10924 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
10925 .merge_bio_hook = btrfs_merge_bio_hook,
10926 .readpage_io_failed_hook = btrfs_readpage_io_failed_hook,
10927 .tree_fs_info = iotree_fs_info,
10928 .set_range_writeback = btrfs_set_range_writeback,
10929
10930 /* optional callbacks */
10931 .fill_delalloc = run_delalloc_range,
10932 .writepage_end_io_hook = btrfs_writepage_end_io_hook,
10933 .writepage_start_hook = btrfs_writepage_start_hook,
10934 .set_bit_hook = btrfs_set_bit_hook,
10935 .clear_bit_hook = btrfs_clear_bit_hook,
10936 .merge_extent_hook = btrfs_merge_extent_hook,
10937 .split_extent_hook = btrfs_split_extent_hook,
10938 .check_extent_io_range = btrfs_check_extent_io_range,
10939 };
10940
10941 /*
10942 * btrfs doesn't support the bmap operation because swapfiles
10943 * use bmap to make a mapping of extents in the file. They assume
10944 * these extents won't change over the life of the file and they
10945 * use the bmap result to do IO directly to the drive.
10946 *
10947 * the btrfs bmap call would return logical addresses that aren't
10948 * suitable for IO and they also will change frequently as COW
10949 * operations happen. So, swapfile + btrfs == corruption.
10950 *
10951 * For now we're avoiding this by dropping bmap.
10952 */
10953 static const struct address_space_operations btrfs_aops = {
10954 .readpage = btrfs_readpage,
10955 .writepage = btrfs_writepage,
10956 .writepages = btrfs_writepages,
10957 .readpages = btrfs_readpages,
10958 .direct_IO = btrfs_direct_IO,
10959 .invalidatepage = btrfs_invalidatepage,
10960 .releasepage = btrfs_releasepage,
10961 .set_page_dirty = btrfs_set_page_dirty,
10962 .error_remove_page = generic_error_remove_page,
10963 };
10964
10965 static const struct address_space_operations btrfs_symlink_aops = {
10966 .readpage = btrfs_readpage,
10967 .writepage = btrfs_writepage,
10968 .invalidatepage = btrfs_invalidatepage,
10969 .releasepage = btrfs_releasepage,
10970 };
10971
10972 static const struct inode_operations btrfs_file_inode_operations = {
10973 .getattr = btrfs_getattr,
10974 .setattr = btrfs_setattr,
10975 .listxattr = btrfs_listxattr,
10976 .permission = btrfs_permission,
10977 .fiemap = btrfs_fiemap,
10978 .get_acl = btrfs_get_acl,
10979 .set_acl = btrfs_set_acl,
10980 .update_time = btrfs_update_time,
10981 };
10982 static const struct inode_operations btrfs_special_inode_operations = {
10983 .getattr = btrfs_getattr,
10984 .setattr = btrfs_setattr,
10985 .permission = btrfs_permission,
10986 .listxattr = btrfs_listxattr,
10987 .get_acl = btrfs_get_acl,
10988 .set_acl = btrfs_set_acl,
10989 .update_time = btrfs_update_time,
10990 };
10991 static const struct inode_operations btrfs_symlink_inode_operations = {
10992 .get_link = page_get_link,
10993 .getattr = btrfs_getattr,
10994 .setattr = btrfs_setattr,
10995 .permission = btrfs_permission,
10996 .listxattr = btrfs_listxattr,
10997 .update_time = btrfs_update_time,
10998 };
10999
11000 const struct dentry_operations btrfs_dentry_operations = {
11001 .d_delete = btrfs_dentry_delete,
11002 .d_release = btrfs_dentry_release,
11003 };