struct ipath_swqe *swq = NULL;
struct ipath_ibdev *dev;
size_t sz;
+ size_t sg_list_sz;
struct ib_qp *ret;
if (init_attr->create_flags) {
goto bail;
}
sz = sizeof(*qp);
+ sg_list_sz = 0;
if (init_attr->srq) {
struct ipath_srq *srq = to_isrq(init_attr->srq);
- sz += sizeof(*qp->r_sg_list) *
- srq->rq.max_sge;
- } else
- sz += sizeof(*qp->r_sg_list) *
- init_attr->cap.max_recv_sge;
- qp = kmalloc(sz, GFP_KERNEL);
+ if (srq->rq.max_sge > 1)
+ sg_list_sz = sizeof(*qp->r_sg_list) *
+ (srq->rq.max_sge - 1);
+ } else if (init_attr->cap.max_recv_sge > 1)
+ sg_list_sz = sizeof(*qp->r_sg_list) *
+ (init_attr->cap.max_recv_sge - 1);
+ qp = kmalloc(sz + sg_list_sz, GFP_KERNEL);
if (!qp) {
ret = ERR_PTR(-ENOMEM);
goto bail_swq;
}
+ if (sg_list_sz && (init_attr->qp_type == IB_QPT_UD ||
+ init_attr->qp_type == IB_QPT_SMI ||
+ init_attr->qp_type == IB_QPT_GSI)) {
+ qp->r_ud_sg_list = kmalloc(sg_list_sz, GFP_KERNEL);
+ if (!qp->r_ud_sg_list) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_qp;
+ }
+ } else
+ qp->r_ud_sg_list = NULL;
if (init_attr->srq) {
sz = 0;
qp->r_rq.size = 0;
qp->r_rq.size * sz);
if (!qp->r_rq.wq) {
ret = ERR_PTR(-ENOMEM);
- goto bail_qp;
+ goto bail_sg_list;
}
}
if (err) {
ret = ERR_PTR(err);
vfree(qp->r_rq.wq);
- goto bail_qp;
+ goto bail_sg_list;
}
qp->ip = NULL;
qp->s_tx = NULL;
vfree(qp->r_rq.wq);
ipath_free_qp(&dev->qp_table, qp);
free_qpn(&dev->qp_table, qp->ibqp.qp_num);
+bail_sg_list:
+ kfree(qp->r_ud_sg_list);
bail_qp:
kfree(qp);
bail_swq:
kref_put(&qp->ip->ref, ipath_release_mmap_info);
else
vfree(qp->r_rq.wq);
+ kfree(qp->r_ud_sg_list);
vfree(qp->s_wq);
kfree(qp);
return 0;
goto done;
}
- rsge.sg_list = NULL;
-
/*
* Check that the qkey matches (except for QP0, see 9.6.1.4.1).
* Qkeys with the high order bit set mean use the
rq = &qp->r_rq;
}
- if (rq->max_sge > 1) {
- /*
- * XXX We could use GFP_KERNEL if ipath_do_send()
- * was always called from the tasklet instead of
- * from ipath_post_send().
- */
- rsge.sg_list = kmalloc((rq->max_sge - 1) *
- sizeof(struct ipath_sge),
- GFP_ATOMIC);
- if (!rsge.sg_list) {
- dev->n_pkt_drops++;
- goto drop;
- }
- }
-
/*
* Get the next work request entry to find where to put the data.
* Note that it is safe to drop the lock after changing rq->tail
goto drop;
}
wqe = get_rwqe_ptr(rq, tail);
+ rsge.sg_list = qp->r_ud_sg_list;
if (!ipath_init_sge(qp, wqe, &rlen, &rsge)) {
spin_unlock_irqrestore(&rq->lock, flags);
dev->n_pkt_drops++;
ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
swqe->wr.send_flags & IB_SEND_SOLICITED);
drop:
- kfree(rsge.sg_list);
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
done:;
u32 s_lsn; /* limit sequence number (credit) */
struct ipath_swqe *s_wq; /* send work queue */
struct ipath_swqe *s_wqe;
+ struct ipath_sge *r_ud_sg_list;
struct ipath_rq r_rq; /* receive work queue */
struct ipath_sge r_sg_list[0]; /* verified SGEs */
};