CIFS: Add asynchronous context to support kernel AIO
authorPavel Shilovsky <pshilov@microsoft.com>
Tue, 25 Apr 2017 18:52:29 +0000 (11:52 -0700)
committerSteve French <smfrench@gmail.com>
Tue, 2 May 2017 19:57:34 +0000 (14:57 -0500)
Currently the code doesn't recognize asynchronous calls passed
by io_submit() and processes all calls synchronously. This is not
what kernel AIO expects. This patch introduces a new async context
that keeps track of all issued i/o requests and moves a response
collecting procedure to a separate thread. This allows to return
to a caller immediately for async calls and call iocb->ki_complete()
once all requests are completed. For sync calls the current thread
simply waits until all requests are completed.

Signed-off-by: Pavel Shilovsky <pshilov@microsoft.com>
Signed-off-by: Steve French <smfrench@gmail.com>
fs/cifs/cifsglob.h
fs/cifs/cifsproto.h
fs/cifs/misc.c

index 37f5a41cc50cc523cd76c790100398d4db9e80ca..bb412261d60164e2a3aade2949a7c23a3c7d1a61 100644 (file)
@@ -1115,6 +1115,22 @@ struct cifs_io_parms {
        struct cifs_tcon *tcon;
 };
 
+struct cifs_aio_ctx {
+       struct kref             refcount;
+       struct list_head        list;
+       struct mutex            aio_mutex;
+       struct completion       done;
+       struct iov_iter         iter;
+       struct kiocb            *iocb;
+       struct cifsFileInfo     *cfile;
+       struct bio_vec          *bv;
+       unsigned int            npages;
+       ssize_t                 rc;
+       unsigned int            len;
+       unsigned int            total_len;
+       bool                    should_dirty;
+};
+
 struct cifs_readdata;
 
 /* asynchronous read support */
index 97e5d236d26559806ca8bc278f7c248e0579ef77..e49958c3f8bbded4265b71e47a87d736a34676da 100644 (file)
@@ -535,4 +535,7 @@ int __cifs_calc_signature(struct smb_rqst *rqst,
                        struct shash_desc *shash);
 enum securityEnum cifs_select_sectype(struct TCP_Server_Info *,
                                        enum securityEnum);
+struct cifs_aio_ctx *cifs_aio_ctx_alloc(void);
+void cifs_aio_ctx_release(struct kref *refcount);
+int setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw);
 #endif                 /* _CIFSPROTO_H */
index 843787850435087f908c1dec15e28669687d9f2b..d8f8ddcdd57cead41f6794ba6cbd3e7061c6ff80 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/slab.h>
 #include <linux/ctype.h>
 #include <linux/mempool.h>
+#include <linux/vmalloc.h>
 #include "cifspdu.h"
 #include "cifsglob.h"
 #include "cifsproto.h"
@@ -741,3 +742,122 @@ parse_DFS_referrals_exit:
        }
        return rc;
 }
+
+struct cifs_aio_ctx *
+cifs_aio_ctx_alloc(void)
+{
+       struct cifs_aio_ctx *ctx;
+
+       ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL);
+       if (!ctx)
+               return NULL;
+
+       INIT_LIST_HEAD(&ctx->list);
+       mutex_init(&ctx->aio_mutex);
+       init_completion(&ctx->done);
+       kref_init(&ctx->refcount);
+       return ctx;
+}
+
+void
+cifs_aio_ctx_release(struct kref *refcount)
+{
+       struct cifs_aio_ctx *ctx = container_of(refcount,
+                                       struct cifs_aio_ctx, refcount);
+
+       cifsFileInfo_put(ctx->cfile);
+       kvfree(ctx->bv);
+       kfree(ctx);
+}
+
+#define CIFS_AIO_KMALLOC_LIMIT (1024 * 1024)
+
+int
+setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
+{
+       ssize_t rc;
+       unsigned int cur_npages;
+       unsigned int npages = 0;
+       unsigned int i;
+       size_t len;
+       size_t count = iov_iter_count(iter);
+       unsigned int saved_len;
+       size_t start;
+       unsigned int max_pages = iov_iter_npages(iter, INT_MAX);
+       struct page **pages = NULL;
+       struct bio_vec *bv = NULL;
+
+       if (iter->type & ITER_KVEC) {
+               memcpy(&ctx->iter, iter, sizeof(struct iov_iter));
+               ctx->len = count;
+               iov_iter_advance(iter, count);
+               return 0;
+       }
+
+       if (max_pages * sizeof(struct bio_vec) <= CIFS_AIO_KMALLOC_LIMIT)
+               bv = kmalloc_array(max_pages, sizeof(struct bio_vec),
+                                  GFP_KERNEL);
+
+       if (!bv) {
+               bv = vmalloc(max_pages * sizeof(struct bio_vec));
+               if (!bv)
+                       return -ENOMEM;
+       }
+
+       if (max_pages * sizeof(struct page *) <= CIFS_AIO_KMALLOC_LIMIT)
+               pages = kmalloc_array(max_pages, sizeof(struct page *),
+                                     GFP_KERNEL);
+
+       if (!pages) {
+               pages = vmalloc(max_pages * sizeof(struct page *));
+               if (!bv) {
+                       kvfree(bv);
+                       return -ENOMEM;
+               }
+       }
+
+       saved_len = count;
+
+       while (count && npages < max_pages) {
+               rc = iov_iter_get_pages(iter, pages, count, max_pages, &start);
+               if (rc < 0) {
+                       cifs_dbg(VFS, "couldn't get user pages (rc=%zd)\n", rc);
+                       break;
+               }
+
+               if (rc > count) {
+                       cifs_dbg(VFS, "get pages rc=%zd more than %zu\n", rc,
+                                count);
+                       break;
+               }
+
+               iov_iter_advance(iter, rc);
+               count -= rc;
+               rc += start;
+               cur_npages = DIV_ROUND_UP(rc, PAGE_SIZE);
+
+               if (npages + cur_npages > max_pages) {
+                       cifs_dbg(VFS, "out of vec array capacity (%u vs %u)\n",
+                                npages + cur_npages, max_pages);
+                       break;
+               }
+
+               for (i = 0; i < cur_npages; i++) {
+                       len = rc > PAGE_SIZE ? PAGE_SIZE : rc;
+                       bv[npages + i].bv_page = pages[i];
+                       bv[npages + i].bv_offset = start;
+                       bv[npages + i].bv_len = len - start;
+                       rc -= len;
+                       start = 0;
+               }
+
+               npages += cur_npages;
+       }
+
+       kvfree(pages);
+       ctx->bv = bv;
+       ctx->len = saved_len - count;
+       ctx->npages = npages;
+       iov_iter_bvec(&ctx->iter, ITER_BVEC | rw, ctx->bv, npages, ctx->len);
+       return 0;
+}