IB/qib, staging/rdma/hfi1, IB/rdmavt: progress selection changes
authorMike Marciniszyn <mike.marciniszyn@intel.com>
Sun, 14 Feb 2016 20:45:44 +0000 (12:45 -0800)
committerDoug Ledford <dledford@redhat.com>
Fri, 11 Mar 2016 01:38:14 +0000 (20:38 -0500)
The non-rdamvt versions of qib and hfi1 allow for a differing
heuristic to override a schedule progress in favor of a direct
call the the progress routine.

This patch adds that for both drivers and rdmavt.

Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
drivers/infiniband/hw/qib/qib_qp.c
drivers/infiniband/sw/rdmavt/qp.c
drivers/staging/rdma/hfi1/qp.c

index 6ffa0221da9f5dd523002d605d99f1ad34c7be60..575b737d9ef3d59441c35545f22ebdf53731b835 100644 (file)
@@ -484,12 +484,13 @@ void qib_get_credit(struct rvt_qp *qp, u32 aeth)
  * the ring but after the wqe has been
  * setup.
  *
- * Returns 0 on success, -EINVAL on failure
+ * Returns 1 to force direct progress, 0 otherwise, -EINVAL on failure
  */
 int qib_check_send_wqe(struct rvt_qp *qp,
                       struct rvt_swqe *wqe)
 {
        struct rvt_ah *ah;
+       int ret = 0;
 
        switch (qp->ibqp.qp_type) {
        case IB_QPT_RC:
@@ -503,11 +504,13 @@ int qib_check_send_wqe(struct rvt_qp *qp,
                ah = ibah_to_rvtah(wqe->ud_wr.ah);
                if (wqe->length > (1 << ah->log_pmtu))
                        return -EINVAL;
+               /* progress hint */
+               ret = 1;
                break;
        default:
                break;
        }
-       return 0;
+       return ret;
 }
 
 #ifdef CONFIG_DEBUG_FS
index dbf124db1fd110a6af6ea9b91541a2f4881ba4ce..ef82abf2d89ec9d2b7ace9bf4706b4de61b52f36 100644 (file)
@@ -1430,7 +1430,9 @@ static inline u32 qp_get_savail(struct rvt_qp *qp)
  * @qp: the QP to post on
  * @wr: the work request to send
  */
-static int rvt_post_one_wr(struct rvt_qp *qp, struct ib_send_wr *wr)
+static int rvt_post_one_wr(struct rvt_qp *qp,
+                          struct ib_send_wr *wr,
+                          int *call_send)
 {
        struct rvt_swqe *wqe;
        u32 next;
@@ -1532,8 +1534,10 @@ static int rvt_post_one_wr(struct rvt_qp *qp, struct ib_send_wr *wr)
        /* general part of wqe valid - allow for driver checks */
        if (rdi->driver_f.check_send_wqe) {
                ret = rdi->driver_f.check_send_wqe(qp, wqe);
-               if (ret)
+               if (ret < 0)
                        goto bail_inval_free;
+               if (ret)
+                       *call_send = ret;
        }
 
        log_pmtu = qp->log_pmtu;
@@ -1606,7 +1610,7 @@ int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
        call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next;
 
        for (; wr; wr = wr->next) {
-               err = rvt_post_one_wr(qp, wr);
+               err = rvt_post_one_wr(qp, wr, &call_send);
                if (unlikely(err)) {
                        *bad_wr = wr;
                        goto bail;
index c7b83d66b59bd5add3184c59ff27eac43d53c83f..2d157054576a327c7caa366d4f4209d2bc61d409 100644 (file)
@@ -73,6 +73,7 @@ static int iowait_sleep(
        struct sdma_txreq *stx,
        unsigned seq);
 static void iowait_wakeup(struct iowait *wait, int reason);
+static void qp_pio_drain(struct rvt_qp *qp);
 
 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
                              struct rvt_qpn_map *map, unsigned off)
@@ -272,7 +273,7 @@ int hfi1_check_send_wqe(struct rvt_qp *qp,
        default:
                break;
        }
-       return 0;
+       return wqe->length <= piothreshold;
 }
 
 /**