* this drops all the extents in the cache that intersect the range
* [start, end]. Existing extents are split as required.
*/
-void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
+void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end,
int skip_pinned)
{
struct extent_map *em;
struct extent_map *split = NULL;
struct extent_map *split2 = NULL;
- struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+ struct extent_map_tree *em_tree = &inode->extent_tree;
u64 len = end - start + 1;
u64 gen;
int ret;
int leafs_visited = 0;
if (drop_cache)
- btrfs_drop_extent_cache(inode, start, end - 1, 0);
+ btrfs_drop_extent_cache(BTRFS_I(inode), start, end - 1, 0);
if (start >= BTRFS_I(inode)->disk_i_size && !replace_extent)
modify_tree = 0;
hole_em = alloc_extent_map();
if (!hole_em) {
- btrfs_drop_extent_cache(inode, offset, end - 1, 0);
+ btrfs_drop_extent_cache(BTRFS_I(inode), offset, end - 1, 0);
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
&BTRFS_I(inode)->runtime_flags);
} else {
hole_em->generation = trans->transid;
do {
- btrfs_drop_extent_cache(inode, offset, end - 1, 0);
+ btrfs_drop_extent_cache(BTRFS_I(inode), offset,
+ end - 1, 0);
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, hole_em, 1);
write_unlock(&em_tree->lock);
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
btrfs_delalloc_release_metadata(BTRFS_I(inode), end + 1 - start);
- btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
+ btrfs_drop_extent_cache(BTRFS_I(inode), start, aligned_end - 1, 0);
out:
/*
* Don't forget to free the reserved space, as for inlined extent
BTRFS_ORDERED_COMPRESSED,
async_extent->compress_type);
if (ret) {
- btrfs_drop_extent_cache(inode, async_extent->start,
+ btrfs_drop_extent_cache(BTRFS_I(inode),
+ async_extent->start,
async_extent->start +
async_extent->ram_size - 1, 0);
goto out_free_reserve;
btrfs_super_total_bytes(fs_info->super_copy));
alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
- btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
+ btrfs_drop_extent_cache(BTRFS_I(inode), start,
+ start + num_bytes - 1, 0);
while (disk_num_bytes > 0) {
unsigned long op;
return ret;
out_drop_extent_cache:
- btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0);
+ btrfs_drop_extent_cache(BTRFS_I(inode), start, start + ram_size - 1, 0);
out_reserve:
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
clear_extent_uptodate(io_tree, start, end, NULL, GFP_NOFS);
/* Drop the cache for the part of the extent we didn't write. */
- btrfs_drop_extent_cache(inode, start, end, 0);
+ btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0);
/*
* If the ordered extent had an IOERR or something else went
*/
if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
root == fs_info->tree_root)
- btrfs_drop_extent_cache(inode, ALIGN(new_size,
+ btrfs_drop_extent_cache(BTRFS_I(inode), ALIGN(new_size,
fs_info->sectorsize),
(u64)-1, 0);
hole_size);
if (err)
break;
- btrfs_drop_extent_cache(inode, cur_offset,
+ btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset,
cur_offset + hole_size - 1, 0);
hole_em = alloc_extent_map();
if (!hole_em) {
write_unlock(&em_tree->lock);
if (err != -EEXIST)
break;
- btrfs_drop_extent_cache(inode, cur_offset,
+ btrfs_drop_extent_cache(BTRFS_I(inode),
+ cur_offset,
cur_offset +
hole_size - 1, 0);
}
if (ret) {
if (em) {
free_extent_map(em);
- btrfs_drop_extent_cache(inode, start,
+ btrfs_drop_extent_cache(BTRFS_I(inode), start,
start + len - 1, 0);
}
em = ERR_PTR(ret);
}
do {
- btrfs_drop_extent_cache(inode, em->start,
+ btrfs_drop_extent_cache(BTRFS_I(inode), em->start,
em->start + em->len - 1, 0);
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em, 1);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
void btrfs_test_destroy_inode(struct inode *inode)
{
- btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
+ btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0);
kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
}
#endif
}
btrfs_qgroup_check_reserved_leak(inode);
inode_tree_del(inode);
- btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
+ btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0);
free:
call_rcu(&inode->i_rcu, btrfs_i_callback);
}
break;
}
- btrfs_drop_extent_cache(inode, cur_offset,
+ btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset,
cur_offset + ins.offset -1, 0);
em = alloc_extent_map();
write_unlock(&em_tree->lock);
if (ret != -EEXIST)
break;
- btrfs_drop_extent_cache(inode, cur_offset,
+ btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset,
cur_offset + ins.offset - 1,
0);
}
if (!ret)
continue;
- btrfs_drop_extent_cache(inode, key.offset, end,
- 1);
+ btrfs_drop_extent_cache(BTRFS_I(inode),
+ key.offset, end, 1);
unlock_extent(&BTRFS_I(inode)->io_tree,
key.offset, end);
}
/* the lock_extent waits for readpage to complete */
lock_extent(&BTRFS_I(inode)->io_tree, start, end);
- btrfs_drop_extent_cache(inode, start, end, 1);
+ btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 1);
unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
}
return 0;
free_extent_map(em);
break;
}
- btrfs_drop_extent_cache(inode, start, end, 0);
+ btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0);
}
unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
return ret;