9P/RDMA: Use a semaphore to protect the RQ
authorSimon Derr <simon.derr@bull.net>
Fri, 21 Jun 2013 13:32:39 +0000 (15:32 +0200)
committerEric Van Hensbergen <ericvh@gmail.com>
Mon, 8 Jul 2013 03:02:29 +0000 (22:02 -0500)
The current code keeps track of the number of buffers posted in the RQ,
and will prevent it from overflowing. But it does so by simply dropping
post requests (And leaking memory in the process).
When this happens there will actually be too few buffers posted, and
soon the 9P server will complain about 'RNR retry counter exceeded'
errors.

Instead, use a semaphore, and block until the RQ is ready for another
buffer to be posted.

Signed-off-by: Simon Derr <simon.derr@bull.net>
Signed-off-by: Eric Van Hensbergen <ericvh@gmail.com>
net/9p/trans_rdma.c

index 274a9c1d3c3deeb00b0ddd96a0eff91dac5b0b2c..ad8dc331574b81441b179b9691811fff6531a0fa 100644 (file)
@@ -73,7 +73,7 @@
  * @sq_depth: The depth of the Send Queue
  * @sq_sem: Semaphore for the SQ
  * @rq_depth: The depth of the Receive Queue.
- * @rq_count: Count of requests in the Receive Queue.
+ * @rq_sem: Semaphore for the RQ
  * @addr: The remote peer's address
  * @req_lock: Protects the active request list
  * @cm_done: Completion event for connection management tracking
@@ -98,7 +98,7 @@ struct p9_trans_rdma {
        int sq_depth;
        struct semaphore sq_sem;
        int rq_depth;
-       atomic_t rq_count;
+       struct semaphore rq_sem;
        struct sockaddr_in addr;
        spinlock_t req_lock;
 
@@ -341,8 +341,8 @@ static void cq_comp_handler(struct ib_cq *cq, void *cq_context)
 
                switch (c->wc_op) {
                case IB_WC_RECV:
-                       atomic_dec(&rdma->rq_count);
                        handle_recv(client, rdma, c, wc.status, wc.byte_len);
+                       up(&rdma->rq_sem);
                        break;
 
                case IB_WC_SEND:
@@ -441,12 +441,14 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
         * outstanding request, so we must keep a count to avoid
         * overflowing the RQ.
         */
-       if (atomic_inc_return(&rdma->rq_count) <= rdma->rq_depth) {
-               err = post_recv(client, rpl_context);
-               if (err)
-                       goto err_free1;
-       } else
-               atomic_dec(&rdma->rq_count);
+       if (down_interruptible(&rdma->rq_sem))
+               goto error; /* FIXME : -EINTR instead */
+
+       err = post_recv(client, rpl_context);
+       if (err) {
+               p9_debug(P9_DEBUG_FCALL, "POST RECV failed\n");
+               goto err_free1;
+       }
 
        /* remove posted receive buffer from request structure */
        req->rc = NULL;
@@ -537,7 +539,7 @@ static struct p9_trans_rdma *alloc_rdma(struct p9_rdma_opts *opts)
        spin_lock_init(&rdma->req_lock);
        init_completion(&rdma->cm_done);
        sema_init(&rdma->sq_sem, rdma->sq_depth);
-       atomic_set(&rdma->rq_count, 0);
+       sema_init(&rdma->rq_sem, rdma->rq_depth);
 
        return rdma;
 }