* @free_reg_res: Free registration resources
* @reg_rdma_mem: Register memory buffers
* @unreg_rdma_mem: Un-register memory buffers
+ * @reg_desc_get: Get a registration descriptor for pool
+ * @reg_desc_put: Get a registration descriptor to pool
*/
struct iser_reg_ops {
int (*alloc_reg_res)(struct ib_conn *ib_conn,
enum iser_data_dir cmd_dir);
void (*unreg_rdma_mem)(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir);
+ struct iser_fr_desc * (*reg_desc_get)(struct ib_conn *ib_conn);
+ void (*reg_desc_put)(struct ib_conn *ib_conn,
+ struct iser_fr_desc *desc);
};
/**
u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir, sector_t *sector);
struct iser_fr_desc *
-iser_reg_desc_get(struct ib_conn *ib_conn);
+iser_reg_desc_get_fr(struct ib_conn *ib_conn);
void
-iser_reg_desc_put(struct ib_conn *ib_conn,
- struct iser_fr_desc *desc);
+iser_reg_desc_put_fr(struct ib_conn *ib_conn,
+ struct iser_fr_desc *desc);
+struct iser_fr_desc *
+iser_reg_desc_get_fmr(struct ib_conn *ib_conn);
+void
+iser_reg_desc_put_fmr(struct ib_conn *ib_conn,
+ struct iser_fr_desc *desc);
#endif
.free_reg_res = iser_free_fastreg_pool,
.reg_rdma_mem = iser_reg_rdma_mem_fastreg,
.unreg_rdma_mem = iser_unreg_mem_fastreg,
+ .reg_desc_get = iser_reg_desc_get_fr,
+ .reg_desc_put = iser_reg_desc_put_fr,
};
static struct iser_reg_ops fmr_ops = {
.free_reg_res = iser_free_fmr_pool,
.reg_rdma_mem = iser_reg_rdma_mem_fmr,
.unreg_rdma_mem = iser_unreg_mem_fmr,
+ .reg_desc_get = iser_reg_desc_get_fmr,
+ .reg_desc_put = iser_reg_desc_put_fmr,
};
int iser_assign_reg_ops(struct iser_device *device)
}
struct iser_fr_desc *
-iser_reg_desc_get(struct ib_conn *ib_conn)
+iser_reg_desc_get_fr(struct ib_conn *ib_conn)
{
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
struct iser_fr_desc *desc;
}
void
-iser_reg_desc_put(struct ib_conn *ib_conn,
- struct iser_fr_desc *desc)
+iser_reg_desc_put_fr(struct ib_conn *ib_conn,
+ struct iser_fr_desc *desc)
{
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
unsigned long flags;
spin_unlock_irqrestore(&fr_pool->lock, flags);
}
+struct iser_fr_desc *
+iser_reg_desc_get_fmr(struct ib_conn *ib_conn)
+{
+ struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
+
+ return list_first_entry(&fr_pool->list,
+ struct iser_fr_desc, list);
+}
+
+void
+iser_reg_desc_put_fmr(struct ib_conn *ib_conn,
+ struct iser_fr_desc *desc)
+{
+}
+
/**
* iser_start_rdma_unaligned_sg
*/
void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir)
{
+ struct iser_device *device = iser_task->iser_conn->ib_conn.device;
struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
if (!reg->mem_h)
return;
- iser_reg_desc_put(&iser_task->iser_conn->ib_conn,
- reg->mem_h);
+ device->reg_ops->reg_desc_put(&iser_task->iser_conn->ib_conn,
+ reg->mem_h);
reg->mem_h = NULL;
}
enum iser_data_dir cmd_dir)
{
struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
- struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
struct iser_device *device = ib_conn->device;
struct ib_device *ibdev = device->ib_device;
struct iser_data_buf *mem = &iser_task->data[cmd_dir];
} else { /* use FMR for multiple dma entries */
struct iser_fr_desc *desc;
- desc = list_first_entry(&fr_pool->list,
- struct iser_fr_desc, list);
+ desc = device->reg_ops->reg_desc_get(ib_conn);
err = iser_fast_reg_fmr(iser_task, mem, &desc->rsc, mem_reg);
if (err && err != -EAGAIN) {
iser_data_buf_dump(mem, ibdev);
if (mem->dma_nents != 1 ||
scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) {
- desc = iser_reg_desc_get(ib_conn);
+ desc = device->reg_ops->reg_desc_get(ib_conn);
mem_reg->mem_h = desc;
}
return 0;
err_reg:
if (desc)
- iser_reg_desc_put(ib_conn, desc);
+ device->reg_ops->reg_desc_put(ib_conn, desc);
return err;
}