return NULL;
}
-struct nvm_block *nvm_get_blk_unlocked(struct nvm_dev *dev, struct nvm_lun *lun,
- unsigned long flags)
-{
- return dev->mt->get_blk_unlocked(dev, lun, flags);
-}
-EXPORT_SYMBOL(nvm_get_blk_unlocked);
-
-/* Assumes that all valid pages have already been moved on release to bm */
-void nvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk)
-{
- return dev->mt->put_blk_unlocked(dev, blk);
-}
-EXPORT_SYMBOL(nvm_put_blk_unlocked);
-
struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun,
unsigned long flags)
{
module_put(THIS_MODULE);
}
-static struct nvm_block *gen_get_blk_unlocked(struct nvm_dev *dev,
+static struct nvm_block *gen_get_blk(struct nvm_dev *dev,
struct nvm_lun *vlun, unsigned long flags)
{
struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
struct nvm_block *blk = NULL;
int is_gc = flags & NVM_IOTYPE_GC;
- assert_spin_locked(&vlun->lock);
-
+ spin_lock(&vlun->lock);
if (list_empty(&lun->free_list)) {
pr_err_ratelimited("gen: lun %u have no free pages available",
lun->vlun.id);
list_move_tail(&blk->list, &lun->used_list);
blk->state = NVM_BLK_ST_TGT;
lun->vlun.nr_free_blocks--;
-
out:
- return blk;
-}
-
-static struct nvm_block *gen_get_blk(struct nvm_dev *dev,
- struct nvm_lun *vlun, unsigned long flags)
-{
- struct nvm_block *blk;
-
- spin_lock(&vlun->lock);
- blk = gen_get_blk_unlocked(dev, vlun, flags);
spin_unlock(&vlun->lock);
return blk;
}
-static void gen_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk)
+static void gen_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
{
struct nvm_lun *vlun = blk->lun;
struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
- assert_spin_locked(&vlun->lock);
-
+ spin_lock(&vlun->lock);
if (blk->state & NVM_BLK_ST_TGT) {
list_move_tail(&blk->list, &lun->free_list);
lun->vlun.nr_free_blocks++;
blk->id, blk->state);
list_move_tail(&blk->list, &lun->bb_list);
}
-}
-
-static void gen_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
-{
- struct nvm_lun *vlun = blk->lun;
-
- spin_lock(&vlun->lock);
- gen_put_blk_unlocked(dev, blk);
spin_unlock(&vlun->lock);
}
.create_tgt = gen_create_tgt,
.remove_tgt = gen_remove_tgt,
- .get_blk_unlocked = gen_get_blk_unlocked,
- .put_blk_unlocked = gen_put_blk_unlocked,
-
.get_blk = gen_get_blk,
.put_blk = gen_put_blk,
static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
unsigned long flags)
{
- struct nvm_lun *lun = rlun->parent;
struct nvm_block *blk;
struct rrpc_block *rblk;
- spin_lock(&lun->lock);
- blk = nvm_get_blk_unlocked(rrpc->dev, rlun->parent, flags);
+ blk = nvm_get_blk(rrpc->dev, rlun->parent, flags);
if (!blk) {
pr_err("nvm: rrpc: cannot get new block from media manager\n");
- spin_unlock(&lun->lock);
return NULL;
}
rblk = rrpc_get_rblk(rlun, blk->id);
- spin_unlock(&lun->lock);
-
blk->priv = rblk;
bitmap_zero(rblk->invalid_pages, rrpc->dev->sec_per_blk);
rblk->next_page = 0;
static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
{
- struct rrpc_lun *rlun = rblk->rlun;
- struct nvm_lun *lun = rlun->parent;
-
- spin_lock(&lun->lock);
- nvm_put_blk_unlocked(rrpc->dev, rblk->parent);
- spin_unlock(&lun->lock);
+ nvm_put_blk(rrpc->dev, rblk->parent);
}
static void rrpc_put_blks(struct rrpc *rrpc)
nvmm_remove_tgt_fn *remove_tgt;
/* Block administration callbacks */
- nvmm_get_blk_fn *get_blk_unlocked;
- nvmm_put_blk_fn *put_blk_unlocked;
nvmm_get_blk_fn *get_blk;
nvmm_put_blk_fn *put_blk;
nvmm_open_blk_fn *open_blk;
extern int nvm_register_mgr(struct nvmm_type *);
extern void nvm_unregister_mgr(struct nvmm_type *);
-extern struct nvm_block *nvm_get_blk_unlocked(struct nvm_dev *,
- struct nvm_lun *, unsigned long);
-extern void nvm_put_blk_unlocked(struct nvm_dev *, struct nvm_block *);
-
extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *,
unsigned long);
extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *);