{
struct extent_status es;
- es.es_lblk = lblk_start;
- (void)ext4_es_find_extent(inode, &es);
+ ext4_es_find_delayed_extent(inode, lblk_start, &es);
if (es.es_len == 0)
return 0; /* there is no delay extent in this tree */
else if (es.es_lblk <= lblk_start &&
struct ext4_ext_cache *newex)
{
struct extent_status es;
- ext4_lblk_t next_del;
+ ext4_lblk_t block, next_del;
- es.es_lblk = newex->ec_block;
- next_del = ext4_es_find_extent(inode, &es);
+ ext4_es_find_delayed_extent(inode, newex->ec_block, &es);
if (newex->ec_start == 0) {
/*
newex->ec_len = es.es_lblk + es.es_len - newex->ec_block;
}
+ block = newex->ec_block + newex->ec_len;
+ ext4_es_find_delayed_extent(inode, block, &es);
+ if (es.es_len == 0)
+ next_del = EXT_MAX_BLOCKS;
+ else
+ next_del = es.es_lblk;
+
return next_del;
}
/* fiemap flags we can handle specified here */
}
/*
- * ext4_es_find_extent: find the 1st delayed extent covering @es->lblk
+ * ext4_es_find_delayed_extent: find the 1st delayed extent covering @es->lblk
* if it exists, otherwise, the next extent after @es->lblk.
*
* @inode: the inode which owns delayed extents
+ * @lblk: the offset where we start to search
* @es: delayed extent that we found
- *
- * Returns the first block of the next extent after es, otherwise
- * EXT_MAX_BLOCKS if no extent is found.
- * Delayed extent is returned via @es.
*/
-ext4_lblk_t ext4_es_find_extent(struct inode *inode, struct extent_status *es)
+void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk,
+ struct extent_status *es)
{
struct ext4_es_tree *tree = NULL;
struct extent_status *es1 = NULL;
struct rb_node *node;
- ext4_lblk_t ret = EXT_MAX_BLOCKS;
- trace_ext4_es_find_extent_enter(inode, es->es_lblk);
+ BUG_ON(es == NULL);
+ trace_ext4_es_find_delayed_extent_enter(inode, lblk);
read_lock(&EXT4_I(inode)->i_es_lock);
tree = &EXT4_I(inode)->i_es_tree;
/* find extent in cache firstly */
- es->es_len = es->es_pblk = 0;
+ es->es_lblk = es->es_len = es->es_pblk = 0;
if (tree->cache_es) {
es1 = tree->cache_es;
- if (in_range(es->es_lblk, es1->es_lblk, es1->es_len)) {
+ if (in_range(lblk, es1->es_lblk, es1->es_len)) {
es_debug("%u cached by [%u/%u) %llu %llx\n",
- es->es_lblk, es1->es_lblk, es1->es_len,
+ lblk, es1->es_lblk, es1->es_len,
ext4_es_pblock(es1), ext4_es_status(es1));
goto out;
}
}
- es1 = __es_tree_search(&tree->root, es->es_lblk);
+ es1 = __es_tree_search(&tree->root, lblk);
out:
- if (es1) {
+ if (es1 && !ext4_es_is_delayed(es1)) {
+ while ((node = rb_next(&es1->rb_node)) != NULL) {
+ es1 = rb_entry(node, struct extent_status, rb_node);
+ if (ext4_es_is_delayed(es1))
+ break;
+ }
+ }
+
+ if (es1 && ext4_es_is_delayed(es1)) {
tree->cache_es = es1;
es->es_lblk = es1->es_lblk;
es->es_len = es1->es_len;
es->es_pblk = es1->es_pblk;
- node = rb_next(&es1->rb_node);
- if (node) {
- es1 = rb_entry(node, struct extent_status, rb_node);
- ret = es1->es_lblk;
- }
}
read_unlock(&EXT4_I(inode)->i_es_lock);
- trace_ext4_es_find_extent_exit(inode, es, ret);
- return ret;
+ trace_ext4_es_find_delayed_extent_exit(inode, es);
}
static struct extent_status *
unsigned long long status);
extern int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len);
-extern ext4_lblk_t ext4_es_find_extent(struct inode *inode,
- struct extent_status *es);
+extern void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk,
+ struct extent_status *es);
static inline int ext4_es_is_written(struct extent_status *es)
{
* If there is a delay extent at this offset,
* it will be as a data.
*/
- es.es_lblk = last;
- (void)ext4_es_find_extent(inode, &es);
+ ext4_es_find_delayed_extent(inode, last, &es);
if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
if (last != start)
dataoff = last << blkbits;
* If there is a delay extent at this offset,
* we will skip this extent.
*/
- es.es_lblk = last;
- (void)ext4_es_find_extent(inode, &es);
+ ext4_es_find_delayed_extent(inode, last, &es);
if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
last = es.es_lblk + es.es_len;
holeoff = last << blkbits;
__entry->lblk, __entry->len)
);
-TRACE_EVENT(ext4_es_find_extent_enter,
+TRACE_EVENT(ext4_es_find_delayed_extent_enter,
TP_PROTO(struct inode *inode, ext4_lblk_t lblk),
TP_ARGS(inode, lblk),
(unsigned long) __entry->ino, __entry->lblk)
);
-TRACE_EVENT(ext4_es_find_extent_exit,
- TP_PROTO(struct inode *inode, struct extent_status *es,
- ext4_lblk_t ret),
+TRACE_EVENT(ext4_es_find_delayed_extent_exit,
+ TP_PROTO(struct inode *inode, struct extent_status *es),
- TP_ARGS(inode, es, ret),
+ TP_ARGS(inode, es),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ext4_lblk_t, len )
__field( ext4_fsblk_t, pblk )
__field( unsigned long long, status )
- __field( ext4_lblk_t, ret )
),
TP_fast_assign(
__entry->len = es->es_len;
__entry->pblk = ext4_es_pblock(es);
__entry->status = ext4_es_status(es);
- __entry->ret = ret;
),
- TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %llx ret %u",
+ TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %llx",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->lblk, __entry->len,
- __entry->pblk, __entry->status, __entry->ret)
+ __entry->pblk, __entry->status)
);
#endif /* _TRACE_EXT4_H */