Use the definitions of the s_flags and r_flags which are now in rdmavt.
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Harish Chegondi <harish.chegondi@intel.com>
Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
*/
list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
list_del_init(&qp->rspwait);
- if (qp->r_flags & HFI1_R_RSP_DEFERED_ACK) {
- qp->r_flags &= ~HFI1_R_RSP_DEFERED_ACK;
+ if (qp->r_flags & RVT_R_RSP_NAK) {
+ qp->r_flags &= ~RVT_R_RSP_NAK;
hfi1_send_rc_ack(rcd, qp, 0);
}
- if (qp->r_flags & HFI1_R_RSP_SEND) {
+ if (qp->r_flags & RVT_R_RSP_SEND) {
unsigned long flags;
- qp->r_flags &= ~HFI1_R_RSP_SEND;
+ qp->r_flags &= ~RVT_R_RSP_SEND;
spin_lock_irqsave(&qp->s_lock, flags);
if (ib_hfi1_state_ops[qp->state] &
HFI1_PROCESS_OR_FLUSH_SEND)
write_sequnlock_irqrestore(&dev->iowait_lock, flags);
for (i = 0; i < n; i++)
- hfi1_qp_wakeup(qps[i], HFI1_S_WAIT_PIO);
+ hfi1_qp_wakeup(qps[i], RVT_S_WAIT_PIO);
}
/* translate a send credit update to a bit code of reasons */
hfi1_do_send,
iowait_sleep,
iowait_wakeup);
- qp->s_flags &= HFI1_S_SIGNAL_REQ_WR;
+ qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
qp->s_hdrwords = 0;
qp->s_wqe = NULL;
qp->s_draining = 0;
{
unsigned n;
- if (test_and_clear_bit(HFI1_R_REWIND_SGE, &qp->r_aflags))
+ if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
hfi1_put_ss(&qp->s_rdma_read_sge);
hfi1_put_ss(&qp->r_sge);
qp->state = IB_QPS_ERR;
- if (qp->s_flags & (HFI1_S_TIMER | HFI1_S_WAIT_RNR)) {
- qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_WAIT_RNR);
+ if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
+ qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
del_timer(&qp->s_timer);
}
- if (qp->s_flags & HFI1_S_ANY_WAIT_SEND)
- qp->s_flags &= ~HFI1_S_ANY_WAIT_SEND;
+ if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
+ qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
write_seqlock(&dev->iowait_lock);
- if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & HFI1_S_BUSY)) {
- qp->s_flags &= ~HFI1_S_ANY_WAIT_IO;
+ if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & RVT_S_BUSY)) {
+ qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
list_del_init(&priv->s_iowait.list);
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
}
write_sequnlock(&dev->iowait_lock);
- if (!(qp->s_flags & HFI1_S_BUSY)) {
+ if (!(qp->s_flags & RVT_S_BUSY)) {
qp->s_hdrwords = 0;
if (qp->s_rdma_mr) {
rvt_put_mr(qp->s_rdma_mr);
wc.qp = &qp->ibqp;
wc.opcode = IB_WC_RECV;
- if (test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags)) {
+ if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
wc.wr_id = qp->r_wr_id;
wc.status = err;
hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
if (qp->state != IB_QPS_RESET) {
qp->state = IB_QPS_RESET;
flush_iowait(qp);
- qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_ANY_WAIT);
+ qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
spin_unlock(&qp->s_lock);
spin_unlock_irq(&qp->r_lock);
/* Stop the sending work queue and retry timer */
case IB_QPS_RTR:
/* Allow event to re-trigger if QP set to RTR more than once */
- qp->r_flags &= ~HFI1_R_COMM_EST;
+ qp->r_flags &= ~RVT_R_COMM_EST;
qp->state = new_state;
break;
qp->remote_ah_attr = qp->alt_ah_attr;
qp->port_num = qp->alt_ah_attr.port_num;
qp->s_pkey_index = qp->s_alt_pkey_index;
- qp->s_flags |= HFI1_S_AHG_CLEAR;
+ qp->s_flags |= RVT_S_AHG_CLEAR;
priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
}
init_attr->recv_cq = qp->ibqp.recv_cq;
init_attr->srq = qp->ibqp.srq;
init_attr->cap = attr->cap;
- if (qp->s_flags & HFI1_S_SIGNAL_REQ_WR)
+ if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
else
init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
qp->s_size = init_attr->cap.max_send_wr + 1;
qp->s_max_sge = init_attr->cap.max_send_sge;
if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
- qp->s_flags = HFI1_S_SIGNAL_REQ_WR;
+ qp->s_flags = RVT_S_SIGNAL_REQ_WR;
dev = to_idev(ibpd->device);
dd = dd_from_dev(dev);
err = alloc_qpn(dd, &dev->qp_dev->qpn_table, init_attr->qp_type,
if (qp->state != IB_QPS_RESET) {
qp->state = IB_QPS_RESET;
flush_iowait(qp);
- qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_ANY_WAIT);
+ qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
spin_unlock(&qp->s_lock);
spin_unlock_irq(&qp->r_lock);
cancel_work_sync(&priv->s_iowait.iowork);
* honor the credit field.
*/
if (credit == HFI1_AETH_CREDIT_INVAL) {
- if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT)) {
- qp->s_flags |= HFI1_S_UNLIMITED_CREDIT;
- if (qp->s_flags & HFI1_S_WAIT_SSN_CREDIT) {
- qp->s_flags &= ~HFI1_S_WAIT_SSN_CREDIT;
+ if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
+ qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
+ if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
+ qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
hfi1_schedule_send(qp);
}
}
- } else if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT)) {
+ } else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
/* Compute new LSN (i.e., MSN + credit) */
credit = (aeth + credit_table[credit]) & HFI1_MSN_MASK;
if (cmp_msn(credit, qp->s_lsn) > 0) {
qp->s_lsn = credit;
- if (qp->s_flags & HFI1_S_WAIT_SSN_CREDIT) {
- qp->s_flags &= ~HFI1_S_WAIT_SSN_CREDIT;
+ if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
+ qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
hfi1_schedule_send(qp);
}
}
to_iport(qp->ibqp.device, qp->port_num);
ibp->rvp.n_dmawait++;
- qp->s_flags |= HFI1_S_WAIT_DMA_DESC;
+ qp->s_flags |= RVT_S_WAIT_DMA_DESC;
list_add_tail(&priv->s_iowait.list, &sde->dmawait);
- trace_hfi1_qpsleep(qp, HFI1_S_WAIT_DMA_DESC);
+ trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC);
atomic_inc(&qp->refcount);
}
write_sequnlock(&dev->iowait_lock);
- qp->s_flags &= ~HFI1_S_BUSY;
+ qp->s_flags &= ~RVT_S_BUSY;
spin_unlock_irqrestore(&qp->s_lock, flags);
ret = -EBUSY;
} else {
struct rvt_qp *qp = iowait_to_qp(wait);
WARN_ON(reason != SDMA_AVAIL_REASON);
- hfi1_qp_wakeup(qp, HFI1_S_WAIT_DMA_DESC);
+ hfi1_qp_wakeup(qp, RVT_S_WAIT_DMA_DESC);
}
int hfi1_qp_init(struct hfi1_ibdev *dev)
void qp_comm_est(struct rvt_qp *qp)
{
- qp->r_flags |= HFI1_R_COMM_EST;
+ qp->r_flags |= RVT_R_COMM_EST;
if (qp->ibqp.event_handler) {
struct ib_event ev;
qp->remote_ah_attr = qp->alt_ah_attr;
qp->port_num = qp->alt_ah_attr.port_num;
qp->s_pkey_index = qp->s_alt_pkey_index;
- qp->s_flags |= HFI1_S_AHG_CLEAR;
+ qp->s_flags |= RVT_S_AHG_CLEAR;
priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr);
priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
struct hfi1_qp_priv *priv = qp->priv;
priv->s_hdr->ahgcount = 0;
- qp->s_flags &= ~(HFI1_S_AHG_VALID | HFI1_S_AHG_CLEAR);
+ qp->s_flags &= ~(RVT_S_AHG_VALID | RVT_S_AHG_CLEAR);
if (priv->s_sde && qp->s_ahgidx >= 0)
sdma_ahg_free(priv->s_sde, qp->s_ahgidx);
qp->s_ahgidx = -1;
static void start_timer(struct rvt_qp *qp)
{
- qp->s_flags |= HFI1_S_TIMER;
+ qp->s_flags |= RVT_S_TIMER;
qp->s_timer.function = rc_timeout;
/* 4.096 usec. * (1 << qp->timeout) */
qp->s_timer.expires = jiffies + qp->timeout_jiffies;
case OP(ACKNOWLEDGE):
/* Check for no next entry in the queue. */
if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
- if (qp->s_flags & HFI1_S_ACK_PENDING)
+ if (qp->s_flags & RVT_S_ACK_PENDING)
goto normal;
goto bail;
}
* (see above).
*/
qp->s_ack_state = OP(SEND_ONLY);
- qp->s_flags &= ~HFI1_S_ACK_PENDING;
+ qp->s_flags &= ~RVT_S_ACK_PENDING;
qp->s_cur_sge = NULL;
if (qp->s_nak_state)
ohdr->u.aeth =
qp->s_ack_state = OP(ACKNOWLEDGE);
/*
* Ensure s_rdma_ack_cnt changes are committed prior to resetting
- * HFI1_S_RESP_PENDING
+ * RVT_S_RESP_PENDING
*/
smp_wmb();
- qp->s_flags &= ~(HFI1_S_RESP_PENDING
- | HFI1_S_ACK_PENDING
- | HFI1_S_AHG_VALID);
+ qp->s_flags &= ~(RVT_S_RESP_PENDING
+ | RVT_S_ACK_PENDING
+ | RVT_S_AHG_VALID);
return 0;
}
spin_lock_irqsave(&qp->s_lock, flags);
/* Sending responses has higher priority over sending requests. */
- if ((qp->s_flags & HFI1_S_RESP_PENDING) &&
+ if ((qp->s_flags & RVT_S_RESP_PENDING) &&
make_rc_ack(dev, qp, ohdr, pmtu))
goto done;
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (atomic_read(&priv->s_iowait.sdma_busy)) {
- qp->s_flags |= HFI1_S_WAIT_DMA;
+ qp->s_flags |= RVT_S_WAIT_DMA;
goto bail;
}
clear_ahg(qp);
goto done;
}
- if (qp->s_flags & (HFI1_S_WAIT_RNR | HFI1_S_WAIT_ACK))
+ if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK))
goto bail;
if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) {
if (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
- qp->s_flags |= HFI1_S_WAIT_PSN;
+ qp->s_flags |= RVT_S_WAIT_PSN;
goto bail;
}
qp->s_sending_psn = qp->s_psn;
*/
if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
qp->s_num_rd_atomic) {
- qp->s_flags |= HFI1_S_WAIT_FENCE;
+ qp->s_flags |= RVT_S_WAIT_FENCE;
goto bail;
}
wqe->psn = qp->s_next_psn;
case IB_WR_SEND:
case IB_WR_SEND_WITH_IMM:
/* If no credit, return. */
- if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT) &&
+ if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
- qp->s_flags |= HFI1_S_WAIT_SSN_CREDIT;
+ qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
goto bail;
}
wqe->lpsn = wqe->psn;
break;
case IB_WR_RDMA_WRITE:
- if (newreq && !(qp->s_flags & HFI1_S_UNLIMITED_CREDIT))
+ if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
qp->s_lsn++;
/* FALLTHROUGH */
case IB_WR_RDMA_WRITE_WITH_IMM:
/* If no credit, return. */
- if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT) &&
+ if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
- qp->s_flags |= HFI1_S_WAIT_SSN_CREDIT;
+ qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
goto bail;
}
ohdr->u.rc.reth.vaddr =
if (newreq) {
if (qp->s_num_rd_atomic >=
qp->s_max_rd_atomic) {
- qp->s_flags |= HFI1_S_WAIT_RDMAR;
+ qp->s_flags |= RVT_S_WAIT_RDMAR;
goto bail;
}
qp->s_num_rd_atomic++;
- if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT))
+ if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
qp->s_lsn++;
/*
* Adjust s_next_psn to count the
if (newreq) {
if (qp->s_num_rd_atomic >=
qp->s_max_rd_atomic) {
- qp->s_flags |= HFI1_S_WAIT_RDMAR;
+ qp->s_flags |= RVT_S_WAIT_RDMAR;
goto bail;
}
qp->s_num_rd_atomic++;
- if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT))
+ if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
qp->s_lsn++;
wqe->lpsn = wqe->psn;
}
delta = delta_psn(bth2, wqe->psn);
if (delta && delta % HFI1_PSN_CREDIT == 0)
bth2 |= IB_BTH_REQ_ACK;
- if (qp->s_flags & HFI1_S_SEND_ONE) {
- qp->s_flags &= ~HFI1_S_SEND_ONE;
- qp->s_flags |= HFI1_S_WAIT_ACK;
+ if (qp->s_flags & RVT_S_SEND_ONE) {
+ qp->s_flags &= ~RVT_S_SEND_ONE;
+ qp->s_flags |= RVT_S_WAIT_ACK;
bth2 |= IB_BTH_REQ_ACK;
}
qp->s_len -= len;
goto unlock;
bail:
- qp->s_flags &= ~HFI1_S_BUSY;
+ qp->s_flags &= ~RVT_S_BUSY;
unlock:
spin_unlock_irqrestore(&qp->s_lock, flags);
return ret;
unsigned long flags;
/* Don't send ACK or NAK if a RDMA read or atomic is pending. */
- if (qp->s_flags & HFI1_S_RESP_PENDING)
+ if (qp->s_flags & RVT_S_RESP_PENDING)
goto queue_ack;
/* Ensure s_rdma_ack_cnt changes are committed */
queue_ack:
this_cpu_inc(*ibp->rvp.rc_qacks);
spin_lock_irqsave(&qp->s_lock, flags);
- qp->s_flags |= HFI1_S_ACK_PENDING | HFI1_S_RESP_PENDING;
+ qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
qp->s_nak_state = qp->r_nak_state;
qp->s_ack_psn = qp->r_ack_psn;
if (is_fecn)
- qp->s_flags |= HFI1_S_ECN;
+ qp->s_flags |= RVT_S_ECN;
/* Schedule the send tasklet. */
hfi1_schedule_send(qp);
done:
qp->s_psn = psn;
/*
- * Set HFI1_S_WAIT_PSN as rc_complete() may start the timer
+ * Set RVT_S_WAIT_PSN as rc_complete() may start the timer
* asynchronously before the send tasklet can get scheduled.
* Doing it in hfi1_make_rc_req() is too late.
*/
if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
(cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
- qp->s_flags |= HFI1_S_WAIT_PSN;
- qp->s_flags &= ~HFI1_S_AHG_VALID;
+ qp->s_flags |= RVT_S_WAIT_PSN;
+ qp->s_flags &= ~RVT_S_AHG_VALID;
}
/*
else
ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
- qp->s_flags &= ~(HFI1_S_WAIT_FENCE | HFI1_S_WAIT_RDMAR |
- HFI1_S_WAIT_SSN_CREDIT | HFI1_S_WAIT_PSN |
- HFI1_S_WAIT_ACK);
+ qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
+ RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
+ RVT_S_WAIT_ACK);
if (wait)
- qp->s_flags |= HFI1_S_SEND_ONE;
+ qp->s_flags |= RVT_S_SEND_ONE;
reset_psn(qp, psn);
}
spin_lock_irqsave(&qp->r_lock, flags);
spin_lock(&qp->s_lock);
- if (qp->s_flags & HFI1_S_TIMER) {
+ if (qp->s_flags & RVT_S_TIMER) {
ibp = to_iport(qp->ibqp.device, qp->port_num);
ibp->rvp.n_rc_timeouts++;
- qp->s_flags &= ~HFI1_S_TIMER;
+ qp->s_flags &= ~RVT_S_TIMER;
del_timer(&qp->s_timer);
trace_hfi1_rc_timeout(qp, qp->s_last_psn + 1);
restart_rc(qp, qp->s_last_psn + 1, 1);
unsigned long flags;
spin_lock_irqsave(&qp->s_lock, flags);
- if (qp->s_flags & HFI1_S_WAIT_RNR) {
- qp->s_flags &= ~HFI1_S_WAIT_RNR;
+ if (qp->s_flags & RVT_S_WAIT_RNR) {
+ qp->s_flags &= ~RVT_S_WAIT_RNR;
del_timer(&qp->s_timer);
hfi1_schedule_send(qp);
}
*/
if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
!(qp->s_flags &
- (HFI1_S_TIMER | HFI1_S_WAIT_RNR | HFI1_S_WAIT_PSN)) &&
+ (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK))
start_timer(qp);
rvt_put_mr(sge->mr);
}
/* Post a send completion queue entry if requested. */
- if (!(qp->s_flags & HFI1_S_SIGNAL_REQ_WR) ||
+ if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
(wqe->wr.send_flags & IB_SEND_SIGNALED)) {
memset(&wc, 0, sizeof(wc));
wc.wr_id = wqe->wr.wr_id;
* and they are now complete, restart sending.
*/
trace_hfi1_rc_sendcomplete(qp, psn);
- if (qp->s_flags & HFI1_S_WAIT_PSN &&
+ if (qp->s_flags & RVT_S_WAIT_PSN &&
cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
- qp->s_flags &= ~HFI1_S_WAIT_PSN;
+ qp->s_flags &= ~RVT_S_WAIT_PSN;
qp->s_sending_psn = qp->s_psn;
qp->s_sending_hpsn = qp->s_psn - 1;
hfi1_schedule_send(qp);
rvt_put_mr(sge->mr);
}
/* Post a send completion queue entry if requested. */
- if (!(qp->s_flags & HFI1_S_SIGNAL_REQ_WR) ||
+ if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
(wqe->wr.send_flags & IB_SEND_SIGNALED)) {
memset(&wc, 0, sizeof(wc));
wc.wr_id = wqe->wr.wr_id;
int diff;
/* Remove QP from retry timer */
- if (qp->s_flags & (HFI1_S_TIMER | HFI1_S_WAIT_RNR)) {
- qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_WAIT_RNR);
+ if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
+ qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
del_timer(&qp->s_timer);
}
wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
(opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
/* Retry this request. */
- if (!(qp->r_flags & HFI1_R_RDMAR_SEQ)) {
- qp->r_flags |= HFI1_R_RDMAR_SEQ;
+ if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
+ qp->r_flags |= RVT_R_RDMAR_SEQ;
restart_rc(qp, qp->s_last_psn + 1, 0);
if (list_empty(&qp->rspwait)) {
- qp->r_flags |= HFI1_R_RSP_SEND;
+ qp->r_flags |= RVT_R_RSP_SEND;
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait,
&rcd->qp_wait_list);
wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
qp->s_num_rd_atomic--;
/* Restart sending task if fence is complete */
- if ((qp->s_flags & HFI1_S_WAIT_FENCE) &&
+ if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
!qp->s_num_rd_atomic) {
- qp->s_flags &= ~(HFI1_S_WAIT_FENCE |
- HFI1_S_WAIT_ACK);
+ qp->s_flags &= ~(RVT_S_WAIT_FENCE |
+ RVT_S_WAIT_ACK);
hfi1_schedule_send(qp);
- } else if (qp->s_flags & HFI1_S_WAIT_RDMAR) {
- qp->s_flags &= ~(HFI1_S_WAIT_RDMAR |
- HFI1_S_WAIT_ACK);
+ } else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
+ qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
+ RVT_S_WAIT_ACK);
hfi1_schedule_send(qp);
}
}
qp->s_state = OP(SEND_LAST);
qp->s_psn = psn + 1;
}
- if (qp->s_flags & HFI1_S_WAIT_ACK) {
- qp->s_flags &= ~HFI1_S_WAIT_ACK;
+ if (qp->s_flags & RVT_S_WAIT_ACK) {
+ qp->s_flags &= ~RVT_S_WAIT_ACK;
hfi1_schedule_send(qp);
}
hfi1_get_credit(qp, aeth);
ibp->rvp.n_rnr_naks++;
if (qp->s_acked == qp->s_tail)
goto bail;
- if (qp->s_flags & HFI1_S_WAIT_RNR)
+ if (qp->s_flags & RVT_S_WAIT_RNR)
goto bail;
if (qp->s_rnr_retry == 0) {
status = IB_WC_RNR_RETRY_EXC_ERR;
reset_psn(qp, psn);
- qp->s_flags &= ~(HFI1_S_WAIT_SSN_CREDIT | HFI1_S_WAIT_ACK);
- qp->s_flags |= HFI1_S_WAIT_RNR;
+ qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
+ qp->s_flags |= RVT_S_WAIT_RNR;
qp->s_timer.function = hfi1_rc_rnr_retry;
qp->s_timer.expires = jiffies + usecs_to_jiffies(
ib_hfi1_rnr_table[(aeth >> HFI1_AETH_CREDIT_SHIFT) &
struct rvt_swqe *wqe;
/* Remove QP from retry timer */
- if (qp->s_flags & (HFI1_S_TIMER | HFI1_S_WAIT_RNR)) {
- qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_WAIT_RNR);
+ if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
+ qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
del_timer(&qp->s_timer);
}
}
ibp->rvp.n_rdma_seq++;
- qp->r_flags |= HFI1_R_RDMAR_SEQ;
+ qp->r_flags |= RVT_R_RDMAR_SEQ;
restart_rc(qp, qp->s_last_psn + 1, 0);
if (list_empty(&qp->rspwait)) {
- qp->r_flags |= HFI1_R_RSP_SEND;
+ qp->r_flags |= RVT_R_RSP_SEND;
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
* Skip everything other than the PSN we expect, if we are waiting
* for a reply to a restarted RDMA read or atomic op.
*/
- if (qp->r_flags & HFI1_R_RDMAR_SEQ) {
+ if (qp->r_flags & RVT_R_RDMAR_SEQ) {
if (cmp_psn(psn, qp->s_last_psn + 1) != 0)
goto ack_done;
- qp->r_flags &= ~HFI1_R_RDMAR_SEQ;
+ qp->r_flags &= ~RVT_R_RDMAR_SEQ;
}
if (unlikely(qp->s_acked == qp->s_tail))
* We got a response so update the timeout.
* 4.096 usec. * (1 << qp->timeout)
*/
- qp->s_flags |= HFI1_S_TIMER;
+ qp->s_flags |= RVT_S_TIMER;
mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies);
- if (qp->s_flags & HFI1_S_WAIT_ACK) {
- qp->s_flags &= ~HFI1_S_WAIT_ACK;
+ if (qp->s_flags & RVT_S_WAIT_ACK) {
+ qp->s_flags &= ~RVT_S_WAIT_ACK;
hfi1_schedule_send(qp);
}
struct rvt_qp *qp)
{
if (list_empty(&qp->rspwait)) {
- qp->r_flags |= HFI1_R_RSP_DEFERED_ACK;
+ qp->r_flags |= RVT_R_RSP_NAK;
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
if (list_empty(&qp->rspwait))
return;
list_del_init(&qp->rspwait);
- qp->r_flags &= ~HFI1_R_RSP_DEFERED_ACK;
+ qp->r_flags &= ~RVT_R_RSP_NAK;
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
}
break;
}
qp->s_ack_state = OP(ACKNOWLEDGE);
- qp->s_flags |= HFI1_S_RESP_PENDING;
+ qp->s_flags |= RVT_S_RESP_PENDING;
qp->r_nak_state = 0;
hfi1_schedule_send(qp);
break;
}
- if (qp->state == IB_QPS_RTR && !(qp->r_flags & HFI1_R_COMM_EST))
+ if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
qp_comm_est(qp);
/* OK, process the packet. */
hfi1_copy_sge(&qp->r_sge, data, tlen, 1);
hfi1_put_ss(&qp->r_sge);
qp->r_msn++;
- if (!test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags))
+ if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
break;
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
qp->r_head_ack_queue = next;
/* Schedule the send tasklet. */
- qp->s_flags |= HFI1_S_RESP_PENDING;
+ qp->s_flags |= RVT_S_RESP_PENDING;
hfi1_schedule_send(qp);
spin_unlock_irqrestore(&qp->s_lock, flags);
qp->r_head_ack_queue = next;
/* Schedule the send tasklet. */
- qp->s_flags |= HFI1_S_RESP_PENDING;
+ qp->s_flags |= RVT_S_RESP_PENDING;
hfi1_schedule_send(qp);
spin_unlock_irqrestore(&qp->s_lock, flags);
qp->r_wr_id = wqe->wr_id;
ret = 1;
- set_bit(HFI1_R_WRID_VALID, &qp->r_aflags);
+ set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
if (handler) {
u32 n;
spin_lock_irqsave(&sqp->s_lock, flags);
/* Return if we are already busy processing a work request. */
- if ((sqp->s_flags & (HFI1_S_BUSY | HFI1_S_ANY_WAIT)) ||
+ if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
!(ib_hfi1_state_ops[sqp->state] & HFI1_PROCESS_OR_FLUSH_SEND))
goto unlock;
- sqp->s_flags |= HFI1_S_BUSY;
+ sqp->s_flags |= RVT_S_BUSY;
again:
if (sqp->s_last == sqp->s_head)
if (release)
hfi1_put_ss(&qp->r_sge);
- if (!test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags))
+ if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
goto send_comp;
if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
spin_lock_irqsave(&sqp->s_lock, flags);
if (!(ib_hfi1_state_ops[sqp->state] & HFI1_PROCESS_RECV_OK))
goto clr_busy;
- sqp->s_flags |= HFI1_S_WAIT_RNR;
+ sqp->s_flags |= RVT_S_WAIT_RNR;
sqp->s_timer.function = hfi1_rc_rnr_retry;
sqp->s_timer.expires = jiffies +
usecs_to_jiffies(ib_hfi1_rnr_table[qp->r_min_rnr_timer]);
if (sqp->ibqp.qp_type == IB_QPT_RC) {
int lastwqe = hfi1_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
- sqp->s_flags &= ~HFI1_S_BUSY;
+ sqp->s_flags &= ~RVT_S_BUSY;
spin_unlock_irqrestore(&sqp->s_lock, flags);
if (lastwqe) {
struct ib_event ev;
goto done;
}
clr_busy:
- sqp->s_flags &= ~HFI1_S_BUSY;
+ sqp->s_flags &= ~RVT_S_BUSY;
unlock:
spin_unlock_irqrestore(&sqp->s_lock, flags);
done:
static inline void build_ahg(struct rvt_qp *qp, u32 npsn)
{
struct hfi1_qp_priv *priv = qp->priv;
- if (unlikely(qp->s_flags & HFI1_S_AHG_CLEAR))
+ if (unlikely(qp->s_flags & RVT_S_AHG_CLEAR))
clear_ahg(qp);
- if (!(qp->s_flags & HFI1_S_AHG_VALID)) {
+ if (!(qp->s_flags & RVT_S_AHG_VALID)) {
/* first middle that needs copy */
if (qp->s_ahgidx < 0)
qp->s_ahgidx = sdma_ahg_alloc(priv->s_sde);
/* save to protect a change in another thread */
priv->s_hdr->sde = priv->s_sde;
priv->s_hdr->ahgidx = qp->s_ahgidx;
- qp->s_flags |= HFI1_S_AHG_VALID;
+ qp->s_flags |= RVT_S_AHG_VALID;
}
} else {
/* subsequent middle after valid */
if (middle)
build_ahg(qp, bth2);
else
- qp->s_flags &= ~HFI1_S_AHG_VALID;
+ qp->s_flags &= ~RVT_S_AHG_VALID;
priv->s_hdr->ibh.lrh[0] = cpu_to_be16(lrh0);
priv->s_hdr->ibh.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
priv->s_hdr->ibh.lrh[2] =
bth0 |= extra_bytes << 20;
ohdr->bth[0] = cpu_to_be32(bth0);
bth1 = qp->remote_qpn;
- if (qp->s_flags & HFI1_S_ECN) {
- qp->s_flags &= ~HFI1_S_ECN;
+ if (qp->s_flags & RVT_S_ECN) {
+ qp->s_flags &= ~RVT_S_ECN;
/* we recently received a FECN, so return a BECN */
bth1 |= (HFI1_BECN_MASK << HFI1_BECN_SHIFT);
}
return;
}
- qp->s_flags |= HFI1_S_BUSY;
+ qp->s_flags |= RVT_S_BUSY;
spin_unlock_irqrestore(&qp->s_lock, flags);
atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
/* See ch. 11.2.4.1 and 10.7.3.1 */
- if (!(qp->s_flags & HFI1_S_SIGNAL_REQ_WR) ||
+ if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
(wqe->wr.send_flags & IB_SEND_SIGNALED) ||
status != IB_WC_SUCCESS) {
struct ib_wc wc;
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (atomic_read(&priv->s_iowait.sdma_busy)) {
- qp->s_flags |= HFI1_S_WAIT_DMA;
+ qp->s_flags |= RVT_S_WAIT_DMA;
goto bail;
}
clear_ahg(qp);
goto unlock;
bail:
- qp->s_flags &= ~HFI1_S_BUSY;
+ qp->s_flags &= ~RVT_S_BUSY;
unlock:
spin_unlock_irqrestore(&qp->s_lock, flags);
return ret;
inv:
if (qp->r_state == OP(SEND_FIRST) ||
qp->r_state == OP(SEND_MIDDLE)) {
- set_bit(HFI1_R_REWIND_SGE, &qp->r_aflags);
+ set_bit(RVT_R_REWIND_SGE, &qp->r_aflags);
qp->r_sge.num_sge = 0;
} else
hfi1_put_ss(&qp->r_sge);
goto inv;
}
- if (qp->state == IB_QPS_RTR && !(qp->r_flags & HFI1_R_COMM_EST))
+ if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
qp_comm_est(qp);
/* OK, process the packet. */
case OP(SEND_ONLY):
case OP(SEND_ONLY_WITH_IMMEDIATE):
send_first:
- if (test_and_clear_bit(HFI1_R_REWIND_SGE, &qp->r_aflags))
+ if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
qp->r_sge = qp->s_rdma_read_sge;
else {
ret = hfi1_get_rwqe(qp, 0);
tlen -= (hdrsize + pad + 4);
if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
goto drop;
- if (test_and_clear_bit(HFI1_R_REWIND_SGE, &qp->r_aflags))
+ if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
hfi1_put_ss(&qp->s_rdma_read_sge);
else {
ret = hfi1_get_rwqe(qp, 1);
return;
rewind:
- set_bit(HFI1_R_REWIND_SGE, &qp->r_aflags);
+ set_bit(RVT_R_REWIND_SGE, &qp->r_aflags);
qp->r_sge.num_sge = 0;
drop:
ibp->rvp.n_pkt_drops++;
/*
* Get the next work request entry to find where to put the data.
*/
- if (qp->r_flags & HFI1_R_REUSE_SGE)
- qp->r_flags &= ~HFI1_R_REUSE_SGE;
+ if (qp->r_flags & RVT_R_REUSE_SGE)
+ qp->r_flags &= ~RVT_R_REUSE_SGE;
else {
int ret;
}
/* Silently drop packets which are too big. */
if (unlikely(wc.byte_len > qp->r_len)) {
- qp->r_flags |= HFI1_R_REUSE_SGE;
+ qp->r_flags |= RVT_R_REUSE_SGE;
ibp->rvp.n_pkt_drops++;
goto bail_unlock;
}
length -= len;
}
hfi1_put_ss(&qp->r_sge);
- if (!test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags))
+ if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
goto bail_unlock;
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (atomic_read(&priv->s_iowait.sdma_busy)) {
- qp->s_flags |= HFI1_S_WAIT_DMA;
+ qp->s_flags |= RVT_S_WAIT_DMA;
goto bail;
}
wqe = get_swqe_ptr(qp, qp->s_last);
* zero length descriptor so we get a callback.
*/
if (atomic_read(&priv->s_iowait.sdma_busy)) {
- qp->s_flags |= HFI1_S_WAIT_DMA;
+ qp->s_flags |= RVT_S_WAIT_DMA;
goto bail;
}
qp->s_cur = next_cur;
goto unlock;
bail:
- qp->s_flags &= ~HFI1_S_BUSY;
+ qp->s_flags &= ~RVT_S_BUSY;
unlock:
spin_unlock_irqrestore(&qp->s_lock, flags);
return ret;
/*
* Get the next work request entry to find where to put the data.
*/
- if (qp->r_flags & HFI1_R_REUSE_SGE)
- qp->r_flags &= ~HFI1_R_REUSE_SGE;
+ if (qp->r_flags & RVT_R_REUSE_SGE)
+ qp->r_flags &= ~RVT_R_REUSE_SGE;
else {
int ret;
}
/* Silently drop packets which are too big. */
if (unlikely(wc.byte_len > qp->r_len)) {
- qp->r_flags |= HFI1_R_REUSE_SGE;
+ qp->r_flags |= RVT_R_REUSE_SGE;
goto drop;
}
if (has_grh) {
hfi1_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
hfi1_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
hfi1_put_ss(&qp->r_sge);
- if (!test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags))
+ if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
return;
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
write_sequnlock_irqrestore(&dev->iowait_lock, flags);
if (qp)
- hfi1_qp_wakeup(qp, HFI1_S_WAIT_KMEM);
+ hfi1_qp_wakeup(qp, RVT_S_WAIT_KMEM);
}
void update_sge(struct rvt_sge_state *ss, u32 length)
if (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK &&
list_empty(&priv->s_iowait.list)) {
dev->n_txwait++;
- qp->s_flags |= HFI1_S_WAIT_TX;
+ qp->s_flags |= RVT_S_WAIT_TX;
list_add_tail(&priv->s_iowait.list, &dev->txwait);
- trace_hfi1_qpsleep(qp, HFI1_S_WAIT_TX);
+ trace_hfi1_qpsleep(qp, RVT_S_WAIT_TX);
atomic_inc(&qp->refcount);
}
- qp->s_flags &= ~HFI1_S_BUSY;
+ qp->s_flags &= ~RVT_S_BUSY;
write_sequnlock(&dev->iowait_lock);
spin_unlock_irqrestore(&qp->s_lock, flags);
tx = ERR_PTR(-EBUSY);
list_del_init(&priv->s_iowait.list);
/* refcount held until actual wake up */
write_sequnlock_irqrestore(&dev->iowait_lock, flags);
- hfi1_qp_wakeup(qp, HFI1_S_WAIT_TX);
+ hfi1_qp_wakeup(qp, RVT_S_WAIT_TX);
break;
}
} while (read_seqretry(&dev->iowait_lock, seq));
* do the flush work until that QP's
* sdma work has finished.
*/
- if (qp->s_flags & HFI1_S_WAIT_DMA) {
- qp->s_flags &= ~HFI1_S_WAIT_DMA;
+ if (qp->s_flags & RVT_S_WAIT_DMA) {
+ qp->s_flags &= ~RVT_S_WAIT_DMA;
hfi1_schedule_send(qp);
}
}
if (list_empty(&priv->s_iowait.list)) {
if (list_empty(&dev->memwait))
mod_timer(&dev->mem_timer, jiffies + 1);
- qp->s_flags |= HFI1_S_WAIT_KMEM;
+ qp->s_flags |= RVT_S_WAIT_KMEM;
list_add_tail(&priv->s_iowait.list, &dev->memwait);
- trace_hfi1_qpsleep(qp, HFI1_S_WAIT_KMEM);
+ trace_hfi1_qpsleep(qp, RVT_S_WAIT_KMEM);
atomic_inc(&qp->refcount);
}
write_sequnlock(&dev->iowait_lock);
- qp->s_flags &= ~HFI1_S_BUSY;
+ qp->s_flags &= ~RVT_S_BUSY;
ret = -EBUSY;
}
spin_unlock_irqrestore(&qp->s_lock, flags);
int was_empty;
dev->n_piowait++;
- qp->s_flags |= HFI1_S_WAIT_PIO;
+ qp->s_flags |= RVT_S_WAIT_PIO;
was_empty = list_empty(&sc->piowait);
list_add_tail(&priv->s_iowait.list, &sc->piowait);
- trace_hfi1_qpsleep(qp, HFI1_S_WAIT_PIO);
+ trace_hfi1_qpsleep(qp, RVT_S_WAIT_PIO);
atomic_inc(&qp->refcount);
/* counting: only call wantpiobuf_intr if first user */
if (was_empty)
hfi1_sc_wantpiobuf_intr(sc, 1);
}
write_sequnlock(&dev->iowait_lock);
- qp->s_flags &= ~HFI1_S_BUSY;
+ qp->s_flags &= ~RVT_S_BUSY;
ret = -EBUSY;
}
spin_unlock_irqrestore(&qp->s_lock, flags);
* @ps: the state of the packet to send
*
* Return zero if packet is sent or queued OK.
- * Return non-zero and clear qp->s_flags HFI1_S_BUSY otherwise.
+ * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
*/
int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
{
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_mad.h>
#include <rdma/rdma_vt.h>
+#include <rdma/rdmavt_qp.h>
struct hfi1_ctxtdata;
struct hfi1_pportdata;
struct hfi1_pportdata *ppd;
};
-/*
- * Atomic bit definitions for r_aflags.
- */
-#define HFI1_R_WRID_VALID 0
-#define HFI1_R_REWIND_SGE 1
-
-/*
- * Bit definitions for r_flags.
- */
-#define HFI1_R_REUSE_SGE 0x01
-#define HFI1_R_RDMAR_SEQ 0x02
-/* defer ack until end of interrupt session */
-#define HFI1_R_RSP_DEFERED_ACK 0x04
-/* relay ack to send engine */
-#define HFI1_R_RSP_SEND 0x08
-#define HFI1_R_COMM_EST 0x10
-
-/*
- * Bit definitions for s_flags.
- *
- * HFI1_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
- * HFI1_S_BUSY - send tasklet is processing the QP
- * HFI1_S_TIMER - the RC retry timer is active
- * HFI1_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
- * HFI1_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
- * before processing the next SWQE
- * HFI1_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
- * before processing the next SWQE
- * HFI1_S_WAIT_RNR - waiting for RNR timeout
- * HFI1_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
- * HFI1_S_WAIT_DMA - waiting for send DMA queue to drain before generating
- * next send completion entry not via send DMA
- * HFI1_S_WAIT_PIO - waiting for a send buffer to be available
- * HFI1_S_WAIT_TX - waiting for a struct verbs_txreq to be available
- * HFI1_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
- * HFI1_S_WAIT_KMEM - waiting for kernel memory to be available
- * HFI1_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
- * HFI1_S_WAIT_ACK - waiting for an ACK packet before sending more requests
- * HFI1_S_SEND_ONE - send one packet, request ACK, then wait for ACK
- * HFI1_S_ECN - a BECN was queued to the send engine
- */
-#define HFI1_S_SIGNAL_REQ_WR 0x0001
-#define HFI1_S_BUSY 0x0002
-#define HFI1_S_TIMER 0x0004
-#define HFI1_S_RESP_PENDING 0x0008
-#define HFI1_S_ACK_PENDING 0x0010
-#define HFI1_S_WAIT_FENCE 0x0020
-#define HFI1_S_WAIT_RDMAR 0x0040
-#define HFI1_S_WAIT_RNR 0x0080
-#define HFI1_S_WAIT_SSN_CREDIT 0x0100
-#define HFI1_S_WAIT_DMA 0x0200
-#define HFI1_S_WAIT_PIO 0x0400
-#define HFI1_S_WAIT_TX 0x0800
-#define HFI1_S_WAIT_DMA_DESC 0x1000
-#define HFI1_S_WAIT_KMEM 0x2000
-#define HFI1_S_WAIT_PSN 0x4000
-#define HFI1_S_WAIT_ACK 0x8000
-#define HFI1_S_SEND_ONE 0x10000
-#define HFI1_S_UNLIMITED_CREDIT 0x20000
-#define HFI1_S_AHG_VALID 0x40000
-#define HFI1_S_AHG_CLEAR 0x80000
-#define HFI1_S_ECN 0x100000
-
-/*
- * Wait flags that would prevent any packet type from being sent.
- */
-#define HFI1_S_ANY_WAIT_IO (HFI1_S_WAIT_PIO | HFI1_S_WAIT_TX | \
- HFI1_S_WAIT_DMA_DESC | HFI1_S_WAIT_KMEM)
-
-/*
- * Wait flags that would prevent send work requests from making progress.
- */
-#define HFI1_S_ANY_WAIT_SEND (HFI1_S_WAIT_FENCE | HFI1_S_WAIT_RDMAR | \
- HFI1_S_WAIT_RNR | HFI1_S_WAIT_SSN_CREDIT | HFI1_S_WAIT_DMA | \
- HFI1_S_WAIT_PSN | HFI1_S_WAIT_ACK)
-
-#define HFI1_S_ANY_WAIT (HFI1_S_ANY_WAIT_IO | HFI1_S_ANY_WAIT_SEND)
-
#define HFI1_PSN_CREDIT 16
/*
*/
static inline int hfi1_send_ok(struct rvt_qp *qp)
{
- return !(qp->s_flags & (HFI1_S_BUSY | HFI1_S_ANY_WAIT_IO)) &&
- (qp->s_hdrwords || (qp->s_flags & HFI1_S_RESP_PENDING) ||
- !(qp->s_flags & HFI1_S_ANY_WAIT_SEND));
+ return !(qp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT_IO)) &&
+ (qp->s_hdrwords || (qp->s_flags & RVT_S_RESP_PENDING) ||
+ !(qp->s_flags & RVT_S_ANY_WAIT_SEND));
}
/*