return -EINVAL;
mhp = to_iwch_mr(ib_mr);
+ kfree(mhp->pages);
rhp = mhp->rhp;
mmid = mhp->attr.stag >> 8;
cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
if (!mhp)
goto err;
+ mhp->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
+ if (!mhp->pages) {
+ ret = -ENOMEM;
+ goto pl_err;
+ }
+
mhp->rhp = rhp;
ret = iwch_alloc_pbl(mhp, max_num_sg);
if (ret)
err2:
iwch_free_pbl(mhp);
err1:
+ kfree(mhp->pages);
+pl_err:
kfree(mhp);
err:
return ERR_PTR(ret);
}
+static int iwch_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct iwch_mr *mhp = to_iwch_mr(ibmr);
+
+ if (unlikely(mhp->npages == mhp->attr.pbl_size))
+ return -ENOMEM;
+
+ mhp->pages[mhp->npages++] = addr;
+
+ return 0;
+}
+
+static int iwch_map_mr_sg(struct ib_mr *ibmr,
+ struct scatterlist *sg,
+ int sg_nents)
+{
+ struct iwch_mr *mhp = to_iwch_mr(ibmr);
+
+ mhp->npages = 0;
+
+ return ib_sg_to_pages(ibmr, sg, sg_nents, iwch_set_page);
+}
+
static struct ib_fast_reg_page_list *iwch_alloc_fastreg_pbl(
struct ib_device *device,
int page_list_len)
dev->ibdev.bind_mw = iwch_bind_mw;
dev->ibdev.dealloc_mw = iwch_dealloc_mw;
dev->ibdev.alloc_mr = iwch_alloc_mr;
+ dev->ibdev.map_mr_sg = iwch_map_mr_sg;
dev->ibdev.alloc_fast_reg_page_list = iwch_alloc_fastreg_pbl;
dev->ibdev.free_fast_reg_page_list = iwch_free_fastreg_pbl;
dev->ibdev.attach_mcast = iwch_multicast_attach;
return 0;
}
+static int build_memreg(union t3_wr *wqe, struct ib_reg_wr *wr,
+ u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq)
+{
+ struct iwch_mr *mhp = to_iwch_mr(wr->mr);
+ int i;
+ __be64 *p;
+
+ if (mhp->npages > T3_MAX_FASTREG_DEPTH)
+ return -EINVAL;
+ *wr_cnt = 1;
+ wqe->fastreg.stag = cpu_to_be32(wr->key);
+ wqe->fastreg.len = cpu_to_be32(mhp->ibmr.length);
+ wqe->fastreg.va_base_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
+ wqe->fastreg.va_base_lo_fbo =
+ cpu_to_be32(mhp->ibmr.iova & 0xffffffff);
+ wqe->fastreg.page_type_perms = cpu_to_be32(
+ V_FR_PAGE_COUNT(mhp->npages) |
+ V_FR_PAGE_SIZE(ilog2(wr->mr->page_size) - 12) |
+ V_FR_TYPE(TPT_VATO) |
+ V_FR_PERMS(iwch_ib_to_tpt_access(wr->access)));
+ p = &wqe->fastreg.pbl_addrs[0];
+ for (i = 0; i < mhp->npages; i++, p++) {
+
+ /* If we need a 2nd WR, then set it up */
+ if (i == T3_MAX_FASTREG_FRAG) {
+ *wr_cnt = 2;
+ wqe = (union t3_wr *)(wq->queue +
+ Q_PTR2IDX((wq->wptr+1), wq->size_log2));
+ build_fw_riwrh((void *)wqe, T3_WR_FASTREG, 0,
+ Q_GENBIT(wq->wptr + 1, wq->size_log2),
+ 0, 1 + mhp->npages - T3_MAX_FASTREG_FRAG,
+ T3_EOP);
+
+ p = &wqe->pbl_frag.pbl_addrs[0];
+ }
+ *p = cpu_to_be64((u64)mhp->pages[i]);
+ }
+ *flit_cnt = 5 + mhp->npages;
+ if (*flit_cnt > 15)
+ *flit_cnt = 15;
+ return 0;
+}
+
static int build_fastreg(union t3_wr *wqe, struct ib_send_wr *send_wr,
u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq)
{
err = build_fastreg(wqe, wr, &t3_wr_flit_cnt,
&wr_cnt, &qhp->wq);
break;
+ case IB_WR_REG_MR:
+ t3_wr_opcode = T3_WR_FASTREG;
+ err = build_memreg(wqe, reg_wr(wr), &t3_wr_flit_cnt,
+ &wr_cnt, &qhp->wq);
+ break;
case IB_WR_LOCAL_INV:
if (wr->send_flags & IB_SEND_FENCE)
t3_wr_flags |= T3_LOCAL_FENCE_FLAG;