nfsd41: sunrpc: Added rpc server-side backchannel handling
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / net / sunrpc / xprtsock.c
index 62438f3a914d6cb9c6f8c47b89a6cd5e026dd933..d9a2b815714e7fa36e6a967f4a1668f8788bd61b 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/tcp.h>
 #include <linux/sunrpc/clnt.h>
 #include <linux/sunrpc/sched.h>
+#include <linux/sunrpc/svcsock.h>
 #include <linux/sunrpc/xprtsock.h>
 #include <linux/file.h>
 #ifdef CONFIG_NFS_V4_1
@@ -43,6 +44,7 @@
 #include <net/udp.h>
 #include <net/tcp.h>
 
+#include "sunrpc.h"
 /*
  * xprtsock tunables
  */
@@ -2098,6 +2100,134 @@ static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
                        xprt->stat.bklog_u);
 }
 
+/*
+ * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason
+ * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
+ * to use the server side send routines.
+ */
+void *bc_malloc(struct rpc_task *task, size_t size)
+{
+       struct page *page;
+       struct rpc_buffer *buf;
+
+       BUG_ON(size > PAGE_SIZE - sizeof(struct rpc_buffer));
+       page = alloc_page(GFP_KERNEL);
+
+       if (!page)
+               return NULL;
+
+       buf = page_address(page);
+       buf->len = PAGE_SIZE;
+
+       return buf->data;
+}
+
+/*
+ * Free the space allocated in the bc_alloc routine
+ */
+void bc_free(void *buffer)
+{
+       struct rpc_buffer *buf;
+
+       if (!buffer)
+               return;
+
+       buf = container_of(buffer, struct rpc_buffer, data);
+       free_page((unsigned long)buf);
+}
+
+/*
+ * Use the svc_sock to send the callback. Must be called with svsk->sk_mutex
+ * held. Borrows heavily from svc_tcp_sendto and xs_tcp_send_request.
+ */
+static int bc_sendto(struct rpc_rqst *req)
+{
+       int len;
+       struct xdr_buf *xbufp = &req->rq_snd_buf;
+       struct rpc_xprt *xprt = req->rq_xprt;
+       struct sock_xprt *transport =
+                               container_of(xprt, struct sock_xprt, xprt);
+       struct socket *sock = transport->sock;
+       unsigned long headoff;
+       unsigned long tailoff;
+
+       /*
+        * Set up the rpc header and record marker stuff
+        */
+       xs_encode_tcp_record_marker(xbufp);
+
+       tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK;
+       headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK;
+       len = svc_send_common(sock, xbufp,
+                             virt_to_page(xbufp->head[0].iov_base), headoff,
+                             xbufp->tail[0].iov_base, tailoff);
+
+       if (len != xbufp->len) {
+               printk(KERN_NOTICE "Error sending entire callback!\n");
+               len = -EAGAIN;
+       }
+
+       return len;
+}
+
+/*
+ * The send routine. Borrows from svc_send
+ */
+static int bc_send_request(struct rpc_task *task)
+{
+       struct rpc_rqst *req = task->tk_rqstp;
+       struct svc_xprt *xprt;
+       struct svc_sock         *svsk;
+       u32                     len;
+
+       dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid));
+       /*
+        * Get the server socket associated with this callback xprt
+        */
+       xprt = req->rq_xprt->bc_xprt;
+       svsk = container_of(xprt, struct svc_sock, sk_xprt);
+
+       /*
+        * Grab the mutex to serialize data as the connection is shared
+        * with the fore channel
+        */
+       if (!mutex_trylock(&xprt->xpt_mutex)) {
+               rpc_sleep_on(&xprt->xpt_bc_pending, task, NULL);
+               if (!mutex_trylock(&xprt->xpt_mutex))
+                       return -EAGAIN;
+               rpc_wake_up_queued_task(&xprt->xpt_bc_pending, task);
+       }
+       if (test_bit(XPT_DEAD, &xprt->xpt_flags))
+               len = -ENOTCONN;
+       else
+               len = bc_sendto(req);
+       mutex_unlock(&xprt->xpt_mutex);
+
+       if (len > 0)
+               len = 0;
+
+       return len;
+}
+
+/*
+ * The close routine. Since this is client initiated, we do nothing
+ */
+
+static void bc_close(struct rpc_xprt *xprt)
+{
+       return;
+}
+
+/*
+ * The xprt destroy routine. Again, because this connection is client
+ * initiated, we do nothing
+ */
+
+static void bc_destroy(struct rpc_xprt *xprt)
+{
+       return;
+}
+
 static struct rpc_xprt_ops xs_udp_ops = {
        .set_buffer_size        = xs_udp_set_buffer_size,
        .reserve_xprt           = xprt_reserve_xprt_cong,
@@ -2134,6 +2264,22 @@ static struct rpc_xprt_ops xs_tcp_ops = {
        .print_stats            = xs_tcp_print_stats,
 };
 
+/*
+ * The rpc_xprt_ops for the server backchannel
+ */
+
+static struct rpc_xprt_ops bc_tcp_ops = {
+       .reserve_xprt           = xprt_reserve_xprt,
+       .release_xprt           = xprt_release_xprt,
+       .buf_alloc              = bc_malloc,
+       .buf_free               = bc_free,
+       .send_request           = bc_send_request,
+       .set_retrans_timeout    = xprt_set_retrans_timeout_def,
+       .close                  = bc_close,
+       .destroy                = bc_destroy,
+       .print_stats            = xs_tcp_print_stats,
+};
+
 static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
                                      unsigned int slot_table_size)
 {