int ret;
down_read(&bmap->b_sem);
- ret = (*bmap->b_ops->bop_lookup)(bmap, key, level, ptrp);
+ ret = bmap->b_ops->bop_lookup(bmap, key, level, ptrp);
if (ret < 0)
goto out;
if (bmap->b_pops->bpop_translate != NULL) {
- ret = (*bmap->b_pops->bpop_translate)(bmap, *ptrp, &ptr);
+ ret = bmap->b_pops->bpop_translate(bmap, *ptrp, &ptr);
if (ret < 0)
goto out;
*ptrp = ptr;
int ret, n;
if (bmap->b_ops->bop_check_insert != NULL) {
- ret = (*bmap->b_ops->bop_check_insert)(bmap, key);
+ ret = bmap->b_ops->bop_check_insert(bmap, key);
if (ret > 0) {
- n = (*bmap->b_ops->bop_gather_data)(
+ n = bmap->b_ops->bop_gather_data(
bmap, keys, ptrs, NILFS_BMAP_SMALL_HIGH + 1);
if (n < 0)
return n;
return ret;
}
- return (*bmap->b_ops->bop_insert)(bmap, key, ptr);
+ return bmap->b_ops->bop_insert(bmap, key, ptr);
}
/**
int ret, n;
if (bmap->b_ops->bop_check_delete != NULL) {
- ret = (*bmap->b_ops->bop_check_delete)(bmap, key);
+ ret = bmap->b_ops->bop_check_delete(bmap, key);
if (ret > 0) {
- n = (*bmap->b_ops->bop_gather_data)(
+ n = bmap->b_ops->bop_gather_data(
bmap, keys, ptrs, NILFS_BMAP_LARGE_LOW + 1);
if (n < 0)
return n;
return ret;
}
- return (*bmap->b_ops->bop_delete)(bmap, key);
+ return bmap->b_ops->bop_delete(bmap, key);
}
int nilfs_bmap_last_key(struct nilfs_bmap *bmap, unsigned long *key)
int ret;
down_read(&bmap->b_sem);
- ret = (*bmap->b_ops->bop_last_key)(bmap, &lastkey);
+ ret = bmap->b_ops->bop_last_key(bmap, &lastkey);
if (!ret)
*key = lastkey;
up_read(&bmap->b_sem);
__u64 lastkey;
int ret;
- ret = (*bmap->b_ops->bop_last_key)(bmap, &lastkey);
+ ret = bmap->b_ops->bop_last_key(bmap, &lastkey);
if (ret < 0) {
if (ret == -ENOENT)
ret = 0;
ret = nilfs_bmap_do_delete(bmap, lastkey);
if (ret < 0)
return ret;
- ret = (*bmap->b_ops->bop_last_key)(bmap, &lastkey);
+ ret = bmap->b_ops->bop_last_key(bmap, &lastkey);
if (ret < 0) {
if (ret == -ENOENT)
ret = 0;
{
down_write(&bmap->b_sem);
if (bmap->b_ops->bop_clear != NULL)
- (*bmap->b_ops->bop_clear)(bmap);
+ bmap->b_ops->bop_clear(bmap);
up_write(&bmap->b_sem);
}
int ret;
down_write(&bmap->b_sem);
- ret = (*bmap->b_ops->bop_propagate)(bmap, bh);
+ ret = bmap->b_ops->bop_propagate(bmap, bh);
up_write(&bmap->b_sem);
return ret;
}
struct list_head *listp)
{
if (bmap->b_ops->bop_lookup_dirty_buffers != NULL)
- (*bmap->b_ops->bop_lookup_dirty_buffers)(bmap, listp);
+ bmap->b_ops->bop_lookup_dirty_buffers(bmap, listp);
}
/**
int ret;
down_write(&bmap->b_sem);
- ret = (*bmap->b_ops->bop_assign)(bmap, bh, blocknr, binfo);
+ ret = bmap->b_ops->bop_assign(bmap, bh, blocknr, binfo);
up_write(&bmap->b_sem);
return ret;
}
return 0;
down_write(&bmap->b_sem);
- ret = (*bmap->b_ops->bop_mark)(bmap, key, level);
+ ret = bmap->b_ops->bop_mark(bmap, key, level);
up_write(&bmap->b_sem);
return ret;
}
{
int ret;
- ret = (*bmap->b_pops->bpop_prepare_end_ptr)(bmap, oldreq);
+ ret = bmap->b_pops->bpop_prepare_end_ptr(bmap, oldreq);
if (ret < 0)
return ret;
- ret = (*bmap->b_pops->bpop_prepare_alloc_ptr)(bmap, newreq);
+ ret = bmap->b_pops->bpop_prepare_alloc_ptr(bmap, newreq);
if (ret < 0)
- (*bmap->b_pops->bpop_abort_end_ptr)(bmap, oldreq);
+ bmap->b_pops->bpop_abort_end_ptr(bmap, oldreq);
return ret;
}
union nilfs_bmap_ptr_req *oldreq,
union nilfs_bmap_ptr_req *newreq)
{
- (*bmap->b_pops->bpop_commit_end_ptr)(bmap, oldreq);
- (*bmap->b_pops->bpop_commit_alloc_ptr)(bmap, newreq);
+ bmap->b_pops->bpop_commit_end_ptr(bmap, oldreq);
+ bmap->b_pops->bpop_commit_alloc_ptr(bmap, newreq);
}
void nilfs_bmap_abort_update(struct nilfs_bmap *bmap,
union nilfs_bmap_ptr_req *oldreq,
union nilfs_bmap_ptr_req *newreq)
{
- (*bmap->b_pops->bpop_abort_end_ptr)(bmap, oldreq);
- (*bmap->b_pops->bpop_abort_alloc_ptr)(bmap, newreq);
+ bmap->b_pops->bpop_abort_end_ptr(bmap, oldreq);
+ bmap->b_pops->bpop_abort_alloc_ptr(bmap, newreq);
}
static int nilfs_bmap_translate_v(const struct nilfs_bmap *bmap, __u64 ptr,
/* allocate a new ptr for data block */
if (btree->bt_ops->btop_find_target != NULL)
path[level].bp_newreq.bpr_ptr =
- (*btree->bt_ops->btop_find_target)(btree, path, key);
+ btree->bt_ops->btop_find_target(btree, path, key);
- ret = (*btree->bt_bmap.b_pops->bpop_prepare_alloc_ptr)(
+ ret = btree->bt_bmap.b_pops->bpop_prepare_alloc_ptr(
&btree->bt_bmap, &path[level].bp_newreq);
if (ret < 0)
goto err_out_data;
/* split */
path[level].bp_newreq.bpr_ptr =
path[level - 1].bp_newreq.bpr_ptr + 1;
- ret = (*btree->bt_bmap.b_pops->bpop_prepare_alloc_ptr)(
+ ret = btree->bt_bmap.b_pops->bpop_prepare_alloc_ptr(
&btree->bt_bmap, &path[level].bp_newreq);
if (ret < 0)
goto err_out_child_node;
/* grow */
path[level].bp_newreq.bpr_ptr = path[level - 1].bp_newreq.bpr_ptr + 1;
- ret = (*btree->bt_bmap.b_pops->bpop_prepare_alloc_ptr)(
+ ret = btree->bt_bmap.b_pops->bpop_prepare_alloc_ptr(
&btree->bt_bmap, &path[level].bp_newreq);
if (ret < 0)
goto err_out_child_node;
/* error */
err_out_curr_node:
- (*btree->bt_bmap.b_pops->bpop_abort_alloc_ptr)(&btree->bt_bmap,
- &path[level].bp_newreq);
+ btree->bt_bmap.b_pops->bpop_abort_alloc_ptr(&btree->bt_bmap,
+ &path[level].bp_newreq);
err_out_child_node:
for (level--; level > NILFS_BTREE_LEVEL_DATA; level--) {
nilfs_bmap_delete_block(&btree->bt_bmap, path[level].bp_sib_bh);
- (*btree->bt_bmap.b_pops->bpop_abort_alloc_ptr)(
+ btree->bt_bmap.b_pops->bpop_abort_alloc_ptr(
&btree->bt_bmap, &path[level].bp_newreq);
}
- (*btree->bt_bmap.b_pops->bpop_abort_alloc_ptr)(&btree->bt_bmap,
+ btree->bt_bmap.b_pops->bpop_abort_alloc_ptr(&btree->bt_bmap,
&path[level].bp_newreq);
err_out_data:
*levelp = level;
set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr));
ptr = path[NILFS_BTREE_LEVEL_DATA].bp_newreq.bpr_ptr;
if (btree->bt_ops->btop_set_target != NULL)
- (*btree->bt_ops->btop_set_target)(btree, key, ptr);
+ btree->bt_ops->btop_set_target(btree, key, ptr);
for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) {
if (btree->bt_bmap.b_pops->bpop_commit_alloc_ptr != NULL) {
- (*btree->bt_bmap.b_pops->bpop_commit_alloc_ptr)(
+ btree->bt_bmap.b_pops->bpop_commit_alloc_ptr(
&btree->bt_bmap, &path[level - 1].bp_newreq);
}
- (*path[level].bp_op)(btree, path, level, &key, &ptr);
+ path[level].bp_op(btree, path, level, &key, &ptr);
}
if (!nilfs_bmap_dirty(&btree->bt_bmap))
nilfs_btree_node_get_ptr(btree, node,
path[level].bp_index);
if (btree->bt_bmap.b_pops->bpop_prepare_end_ptr != NULL) {
- ret = (*btree->bt_bmap.b_pops->bpop_prepare_end_ptr)(
+ ret = btree->bt_bmap.b_pops->bpop_prepare_end_ptr(
&btree->bt_bmap, &path[level].bp_oldreq);
if (ret < 0)
goto err_out_child_node;
path[level].bp_oldreq.bpr_ptr =
nilfs_btree_node_get_ptr(btree, node, path[level].bp_index);
if (btree->bt_bmap.b_pops->bpop_prepare_end_ptr != NULL) {
- ret = (*btree->bt_bmap.b_pops->bpop_prepare_end_ptr)(
+ ret = btree->bt_bmap.b_pops->bpop_prepare_end_ptr(
&btree->bt_bmap, &path[level].bp_oldreq);
if (ret < 0)
goto err_out_child_node;
/* error */
err_out_curr_node:
if (btree->bt_bmap.b_pops->bpop_abort_end_ptr != NULL)
- (*btree->bt_bmap.b_pops->bpop_abort_end_ptr)(
+ btree->bt_bmap.b_pops->bpop_abort_end_ptr(
&btree->bt_bmap, &path[level].bp_oldreq);
err_out_child_node:
for (level--; level >= NILFS_BTREE_LEVEL_NODE_MIN; level--) {
nilfs_bmap_put_block(&btree->bt_bmap, path[level].bp_sib_bh);
if (btree->bt_bmap.b_pops->bpop_abort_end_ptr != NULL)
- (*btree->bt_bmap.b_pops->bpop_abort_end_ptr)(
+ btree->bt_bmap.b_pops->bpop_abort_end_ptr(
&btree->bt_bmap, &path[level].bp_oldreq);
}
*levelp = level;
for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) {
if (btree->bt_bmap.b_pops->bpop_commit_end_ptr != NULL)
- (*btree->bt_bmap.b_pops->bpop_commit_end_ptr)(
+ btree->bt_bmap.b_pops->bpop_commit_end_ptr(
&btree->bt_bmap, &path[level].bp_oldreq);
- (*path[level].bp_op)(btree, path, level, NULL, NULL);
+ path[level].bp_op(btree, path, level, NULL, NULL);
}
if (!nilfs_bmap_dirty(&btree->bt_bmap))
/* cannot find near ptr */
if (btree->bt_ops->btop_find_target != NULL)
dreq->bpr_ptr
- = (*btree->bt_ops->btop_find_target)(btree, NULL, key);
- ret = (*bmap->b_pops->bpop_prepare_alloc_ptr)(bmap, dreq);
+ = btree->bt_ops->btop_find_target(btree, NULL, key);
+ ret = bmap->b_pops->bpop_prepare_alloc_ptr(bmap, dreq);
if (ret < 0)
return ret;
stats->bs_nblocks++;
if (nreq != NULL) {
nreq->bpr_ptr = dreq->bpr_ptr + 1;
- ret = (*bmap->b_pops->bpop_prepare_alloc_ptr)(bmap, nreq);
+ ret = bmap->b_pops->bpop_prepare_alloc_ptr(bmap, nreq);
if (ret < 0)
goto err_out_dreq;
/* error */
err_out_nreq:
- (*bmap->b_pops->bpop_abort_alloc_ptr)(bmap, nreq);
+ bmap->b_pops->bpop_abort_alloc_ptr(bmap, nreq);
err_out_dreq:
- (*bmap->b_pops->bpop_abort_alloc_ptr)(bmap, dreq);
+ bmap->b_pops->bpop_abort_alloc_ptr(bmap, dreq);
stats->bs_nblocks = 0;
return ret;
/* free resources */
if (bmap->b_ops->bop_clear != NULL)
- (*bmap->b_ops->bop_clear)(bmap);
+ bmap->b_ops->bop_clear(bmap);
/* ptr must be a pointer to a buffer head. */
set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr));
nilfs_btree_init(bmap, low, high);
if (nreq != NULL) {
if (bmap->b_pops->bpop_commit_alloc_ptr != NULL) {
- (*bmap->b_pops->bpop_commit_alloc_ptr)(bmap, dreq);
- (*bmap->b_pops->bpop_commit_alloc_ptr)(bmap, nreq);
+ bmap->b_pops->bpop_commit_alloc_ptr(bmap, dreq);
+ bmap->b_pops->bpop_commit_alloc_ptr(bmap, nreq);
}
/* create child node at level 1 */
2, 1, &keys[0], &tmpptr);
} else {
if (bmap->b_pops->bpop_commit_alloc_ptr != NULL)
- (*bmap->b_pops->bpop_commit_alloc_ptr)(bmap, dreq);
+ bmap->b_pops->bpop_commit_alloc_ptr(bmap, dreq);
/* create root node at level 1 */
node = nilfs_btree_get_root(btree);
}
if (btree->bt_ops->btop_set_target != NULL)
- (*btree->bt_ops->btop_set_target)(btree, key, dreq->bpr_ptr);
+ btree->bt_ops->btop_set_target(btree, key, dreq->bpr_ptr);
}
/**
goto out;
}
- ret = (*btree->bt_ops->btop_propagate)(btree, path, level, bh);
+ ret = btree->bt_ops->btop_propagate(btree, path, level, bh);
out:
nilfs_btree_clear_path(btree, path);
ptr = nilfs_btree_node_get_ptr(btree, parent,
path[level + 1].bp_index);
req.bpr_ptr = ptr;
- ret = (*btree->bt_bmap.b_pops->bpop_prepare_start_ptr)(&btree->bt_bmap,
+ ret = btree->bt_bmap.b_pops->bpop_prepare_start_ptr(&btree->bt_bmap,
&req);
if (ret < 0)
return ret;
- (*btree->bt_bmap.b_pops->bpop_commit_start_ptr)(&btree->bt_bmap,
+ btree->bt_bmap.b_pops->bpop_commit_start_ptr(&btree->bt_bmap,
&req, blocknr);
key = nilfs_btree_node_get_key(btree, parent,
goto out;
}
- ret = (*btree->bt_ops->btop_assign)(btree, path, level, bh,
+ ret = btree->bt_ops->btop_assign(btree, path, level, bh,
blocknr, binfo);
out:
int ret;
if (direct->d_ops->dop_find_target != NULL)
- req->bpr_ptr = (*direct->d_ops->dop_find_target)(direct, key);
- ret = (*direct->d_bmap.b_pops->bpop_prepare_alloc_ptr)(&direct->d_bmap,
+ req->bpr_ptr = direct->d_ops->dop_find_target(direct, key);
+ ret = direct->d_bmap.b_pops->bpop_prepare_alloc_ptr(&direct->d_bmap,
req);
if (ret < 0)
return ret;
set_buffer_nilfs_volatile(bh);
if (direct->d_bmap.b_pops->bpop_commit_alloc_ptr != NULL)
- (*direct->d_bmap.b_pops->bpop_commit_alloc_ptr)(
+ direct->d_bmap.b_pops->bpop_commit_alloc_ptr(
&direct->d_bmap, req);
nilfs_direct_set_ptr(direct, key, req->bpr_ptr);
nilfs_bmap_set_dirty(&direct->d_bmap);
if (direct->d_ops->dop_set_target != NULL)
- (*direct->d_ops->dop_set_target)(direct, key, req->bpr_ptr);
+ direct->d_ops->dop_set_target(direct, key, req->bpr_ptr);
}
static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
if (direct->d_bmap.b_pops->bpop_prepare_end_ptr != NULL) {
req->bpr_ptr = nilfs_direct_get_ptr(direct, key);
- ret = (*direct->d_bmap.b_pops->bpop_prepare_end_ptr)(
+ ret = direct->d_bmap.b_pops->bpop_prepare_end_ptr(
&direct->d_bmap, req);
if (ret < 0)
return ret;
__u64 key)
{
if (direct->d_bmap.b_pops->bpop_commit_end_ptr != NULL)
- (*direct->d_bmap.b_pops->bpop_commit_end_ptr)(
+ direct->d_bmap.b_pops->bpop_commit_end_ptr(
&direct->d_bmap, req);
nilfs_direct_set_ptr(direct, key, NILFS_BMAP_INVALID_PTR);
}
/* no need to allocate any resource for conversion */
/* delete */
- ret = (*bmap->b_ops->bop_delete)(bmap, key);
+ ret = bmap->b_ops->bop_delete(bmap, key);
if (ret < 0)
return ret;
/* free resources */
if (bmap->b_ops->bop_clear != NULL)
- (*bmap->b_ops->bop_clear)(bmap);
+ bmap->b_ops->bop_clear(bmap);
/* convert */
direct = (struct nilfs_direct *)bmap;
direct = (struct nilfs_direct *)bmap;
return (direct->d_ops->dop_propagate != NULL) ?
- (*direct->d_ops->dop_propagate)(direct, bh) :
+ direct->d_ops->dop_propagate(direct, bh) :
0;
}
int ret;
req.bpr_ptr = ptr;
- ret = (*direct->d_bmap.b_pops->bpop_prepare_start_ptr)(
+ ret = direct->d_bmap.b_pops->bpop_prepare_start_ptr(
&direct->d_bmap, &req);
if (ret < 0)
return ret;
- (*direct->d_bmap.b_pops->bpop_commit_start_ptr)(&direct->d_bmap,
- &req, blocknr);
+ direct->d_bmap.b_pops->bpop_commit_start_ptr(&direct->d_bmap,
+ &req, blocknr);
binfo->bi_v.bi_vblocknr = nilfs_bmap_ptr_to_dptr(ptr);
binfo->bi_v.bi_blkoff = nilfs_bmap_key_to_dkey(key);
ptr = nilfs_direct_get_ptr(direct, key);
BUG_ON(ptr == NILFS_BMAP_INVALID_PTR);
- return (*direct->d_ops->dop_assign)(direct, key, ptr, bh,
- blocknr, binfo);
+ return direct->d_ops->dop_assign(direct, key, ptr, bh,
+ blocknr, binfo);
}
static const struct nilfs_bmap_operations nilfs_direct_ops = {