{
struct rds_ib_mr_pool *pool;
struct rds_ib_mr *ibmr = NULL;
+ struct rds_ib_fmr *fmr;
int err = 0, iter = 0;
if (npages <= RDS_MR_8K_MSG_SIZE)
goto out_no_cigar;
}
- ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
+ fmr = &ibmr->u.fmr;
+ fmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
(IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_ATOMIC),
&pool->fmr_attr);
- if (IS_ERR(ibmr->fmr)) {
- err = PTR_ERR(ibmr->fmr);
- ibmr->fmr = NULL;
+ if (IS_ERR(fmr->fmr)) {
+ err = PTR_ERR(fmr->fmr);
+ fmr->fmr = NULL;
pr_warn("RDS/IB: %s failed (err=%d)\n", __func__, err);
goto out_no_cigar;
}
out_no_cigar:
if (ibmr) {
- if (ibmr->fmr)
- ib_dealloc_fmr(ibmr->fmr);
+ if (fmr->fmr)
+ ib_dealloc_fmr(fmr->fmr);
kfree(ibmr);
}
atomic_dec(&pool->item_count);
struct scatterlist *sg, unsigned int nents)
{
struct ib_device *dev = rds_ibdev->dev;
+ struct rds_ib_fmr *fmr = &ibmr->u.fmr;
struct scatterlist *scat = sg;
u64 io_addr = 0;
u64 *dma_pages;
(dma_addr & PAGE_MASK) + j;
}
- ret = ib_map_phys_fmr(ibmr->fmr, dma_pages, page_cnt, io_addr);
+ ret = ib_map_phys_fmr(fmr->fmr, dma_pages, page_cnt, io_addr);
if (ret)
goto out;
#define RDS_MR_8K_SCALE (256 / (RDS_MR_8K_MSG_SIZE + 1))
#define RDS_MR_8K_POOL_SIZE (RDS_MR_8K_SCALE * (8192 / 2))
+struct rds_ib_fmr {
+ struct ib_fmr *fmr;
+ u64 *dma;
+};
+
/* This is stored as mr->r_trans_private. */
struct rds_ib_mr {
struct rds_ib_device *device;
struct rds_ib_mr_pool *pool;
- struct ib_fmr *fmr;
struct llist_node llnode;
struct scatterlist *sg;
unsigned int sg_len;
- u64 *dma;
int sg_dma_len;
+
+ union {
+ struct rds_ib_fmr fmr;
+ } u;
};
/* Our own little MR pool */
int free_all, struct rds_ib_mr **ibmr_ret)
{
struct rds_ib_mr *ibmr, *next;
+ struct rds_ib_fmr *fmr;
struct llist_node *clean_nodes;
struct llist_node *clean_tail;
LIST_HEAD(unmap_list);
goto out;
/* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
- list_for_each_entry(ibmr, &unmap_list, unmap_list)
- list_add(&ibmr->fmr->list, &fmr_list);
+ list_for_each_entry(ibmr, &unmap_list, unmap_list) {
+ fmr = &ibmr->u.fmr;
+ list_add(&fmr->fmr->list, &fmr_list);
+ }
ret = ib_unmap_fmr(&fmr_list);
if (ret)
/* Now we can destroy the DMA mapping and unpin any pages */
list_for_each_entry_safe(ibmr, next, &unmap_list, unmap_list) {
unpinned += ibmr->sg_len;
+ fmr = &ibmr->u.fmr;
__rds_ib_teardown_mr(ibmr);
if (nfreed < free_goal ||
ibmr->remap_count >= pool->fmr_attr.max_maps) {
else
rds_ib_stats_inc(s_ib_rdma_mr_1m_free);
list_del(&ibmr->unmap_list);
- ib_dealloc_fmr(ibmr->fmr);
+ ib_dealloc_fmr(fmr->fmr);
kfree(ibmr);
nfreed++;
}
{
struct rds_ib_device *rds_ibdev;
struct rds_ib_mr *ibmr = NULL;
+ struct rds_ib_fmr *fmr;
int ret;
rds_ibdev = rds_ib_get_device(rs->rs_bound_addr);
return ibmr;
}
+ fmr = &ibmr->u.fmr;
ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
if (ret == 0)
- *key_ret = ibmr->fmr->rkey;
+ *key_ret = fmr->fmr->rkey;
else
printk(KERN_WARNING "RDS/IB: map_fmr failed (errno=%d)\n", ret);