IB/hfi1, IB/rdmavt: Move r_adefered to r_lock cache line
authorMike Marciniszyn <mike.marciniszyn@intel.com>
Thu, 4 May 2017 12:14:04 +0000 (05:14 -0700)
committerDoug Ledford <dledford@redhat.com>
Thu, 4 May 2017 23:31:46 +0000 (19:31 -0400)
This field is causing excessive cache line bouncing.

There are spare bytes in the r_lock cache line so the best approach
is to make an rvt QP field and remove from the hfi1 priv field.

Signed-off-by: Sebastian Sanchez <sebastian.sanchez@intel.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
drivers/infiniband/hw/hfi1/qp.c
drivers/infiniband/hw/hfi1/rc.c
drivers/infiniband/hw/hfi1/verbs.h
include/rdma/rdmavt_qp.h

index 4573e4c9f35cc7ed9577cc970d228334c29134b1..650305cc037306923e5e742c02f7977e672a70f9 100644 (file)
@@ -731,9 +731,7 @@ void quiesce_qp(struct rvt_qp *qp)
 
 void notify_qp_reset(struct rvt_qp *qp)
 {
-       struct hfi1_qp_priv *priv = qp->priv;
-
-       priv->r_adefered = 0;
+       qp->r_adefered = 0;
        clear_ahg(qp);
 }
 
index 75a729cd0c3dfbd4639561e4a3b0161ff75b2573..069bdaf061ab923cbc8b123ab182806fdb3c4dac 100644 (file)
@@ -727,10 +727,9 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp,
        struct ib_header hdr;
        struct ib_other_headers *ohdr;
        unsigned long flags;
-       struct hfi1_qp_priv *priv = qp->priv;
 
        /* clear the defer count */
-       priv->r_adefered = 0;
+       qp->r_adefered = 0;
 
        /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
        if (qp->s_flags & RVT_S_RESP_PENDING)
@@ -1604,9 +1603,7 @@ static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd,
 
 static inline void rc_cancel_ack(struct rvt_qp *qp)
 {
-       struct hfi1_qp_priv *priv = qp->priv;
-
-       priv->r_adefered = 0;
+       qp->r_adefered = 0;
        if (list_empty(&qp->rspwait))
                return;
        list_del_init(&qp->rspwait);
@@ -2314,13 +2311,11 @@ send_last:
        qp->r_nak_state = 0;
        /* Send an ACK if requested or required. */
        if (psn & IB_BTH_REQ_ACK) {
-               struct hfi1_qp_priv *priv = qp->priv;
-
                if (packet->numpkt == 0) {
                        rc_cancel_ack(qp);
                        goto send_ack;
                }
-               if (priv->r_adefered >= HFI1_PSN_CREDIT) {
+               if (qp->r_adefered >= HFI1_PSN_CREDIT) {
                        rc_cancel_ack(qp);
                        goto send_ack;
                }
@@ -2328,7 +2323,7 @@ send_last:
                        rc_cancel_ack(qp);
                        goto send_ack;
                }
-               priv->r_adefered++;
+               qp->r_adefered++;
                rc_defered_ack(rcd, qp);
        }
        return;
index 52ff275caf540f3b7928e6e85d995784db59aef8..c0913c6c8002d6e716c42a938a65760373d61b67 100644 (file)
@@ -125,7 +125,6 @@ struct hfi1_qp_priv {
        struct sdma_engine *s_sde;                /* current sde */
        struct send_context *s_sendcontext;       /* current sendcontext */
        u8 s_sc;                                  /* SC[0..4] for next packet */
-       u8 r_adefered;                            /* number of acks defered */
        struct iowait s_iowait;
        struct rvt_qp *owner;
 };
index 1d8141a88d3cbce74491f4b81ca823137312b59d..be6472e5b06bd1ed1117e928e9b3d44748dfbfeb 100644 (file)
@@ -324,6 +324,7 @@ struct rvt_qp {
        u8 r_state;             /* opcode of last packet received */
        u8 r_flags;
        u8 r_head_ack_queue;    /* index into s_ack_queue[] */
+       u8 r_adefered;          /* defered ack count */
 
        struct list_head rspwait;       /* link for waiting to respond */