struct extent_map *em;
int ret;
-again:
spin_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len);
spin_unlock(&em_tree->lock);
- if (em) {
+ if (em)
goto out;
- }
+
em = alloc_extent_map(GFP_NOFS);
if (!em) {
em = ERR_PTR(-ENOMEM);
spin_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
- spin_unlock(&em_tree->lock);
-
if (ret == -EEXIST) {
free_extent_map(em);
- em = NULL;
- goto again;
+ em = lookup_extent_mapping(em_tree, start, len);
+ if (em)
+ ret = 0;
+ else
+ ret = -EIO;
} else if (ret) {
- em = ERR_PTR(ret);
+ free_extent_map(em);
+ em = NULL;
}
+ spin_unlock(&em_tree->lock);
+
+ if (ret)
+ em = ERR_PTR(ret);
out:
return em;
}
}
tree = &BTRFS_I(page->mapping->host)->io_tree;
map = &BTRFS_I(page->mapping->host)->extent_tree;
- ret = try_release_extent_mapping(map, tree, page, gfp_flags);
+ ret = try_release_extent_state(map, tree, page, gfp_flags);
if (ret == 1) {
invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE);
ClearPagePrivate(page);
}
EXPORT_SYMBOL(extent_prepare_write);
+/*
+ * a helper for releasepage, this tests for areas of the page that
+ * are locked or under IO and drops the related state bits if it is safe
+ * to drop the page.
+ */
+int try_release_extent_state(struct extent_map_tree *map,
+ struct extent_io_tree *tree, struct page *page,
+ gfp_t mask)
+{
+ u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+ u64 end = start + PAGE_CACHE_SIZE - 1;
+ int ret = 1;
+
+ if (test_range_bit(tree, start, end, EXTENT_IOBITS, 0))
+ ret = 0;
+ else {
+ if ((mask & GFP_NOFS) == GFP_NOFS)
+ mask = GFP_NOFS;
+ clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
+ 1, 1, mask);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(try_release_extent_state);
+
/*
* a helper for releasepage. As long as there are no locked extents
* in the range corresponding to the page, both state records and extent
struct extent_map *em;
u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
u64 end = start + PAGE_CACHE_SIZE - 1;
- u64 orig_start = start;
- int ret = 1;
+
if ((mask & __GFP_WAIT) &&
page->mapping->host->i_size > 16 * 1024 * 1024) {
u64 len;
free_extent_map(em);
}
}
- if (test_range_bit(tree, orig_start, end, EXTENT_IOBITS, 0))
- ret = 0;
- else {
- if ((mask & GFP_NOFS) == GFP_NOFS)
- mask = GFP_NOFS;
- clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
- 1, 1, mask);
- }
- return ret;
+ return try_release_extent_state(map, tree, page, mask);
}
EXPORT_SYMBOL(try_release_extent_mapping);
int try_release_extent_mapping(struct extent_map_tree *map,
struct extent_io_tree *tree, struct page *page,
gfp_t mask);
+int try_release_extent_state(struct extent_map_tree *map,
+ struct extent_io_tree *tree, struct page *page,
+ gfp_t mask);
int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask);
int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask);
int extent_read_full_page(struct extent_io_tree *tree, struct page *page,