BUG_ON(err == -EEXIST);
goto out;
}
-
state = rb_entry(node, struct extent_state, rb_node);
+hit_next:
last_start = state->start;
last_end = state->end;
* Just lock what we found and keep going
*/
if (state->start == start && state->end <= end) {
+ struct rb_node *next_node;
set = state->state & bits;
if (set && exclusive) {
*failed_start = state->start;
merge_state(tree, state);
if (last_end == (u64)-1)
goto out;
+
start = last_end + 1;
+ if (start < end && prealloc && !need_resched()) {
+ next_node = rb_next(node);
+ if (next_node) {
+ state = rb_entry(next_node, struct extent_state,
+ rb_node);
+ if (state->start == start)
+ goto hit_next;
+ }
+ }
goto search_again;
}
gfp_t mask)
{
return set_extent_bit(tree, start, end,
- EXTENT_DELALLOC | EXTENT_DIRTY,
+ EXTENT_DELALLOC | EXTENT_DIRTY | EXTENT_UPTODATE,
0, NULL, mask);
}
btrfs_set_trans_block_group(trans, inode);
hint_byte = 0;
- set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS);
-
/* check for reserved extents on each page, we don't want
* to reset the delalloc bit on things that already have
* extents reserved.