extern int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh);
#endif
+struct nfs_pgio_completion_ops;
/* read.c */
-extern void nfs_async_read_error(struct list_head *head);
extern struct nfs_read_header *nfs_readhdr_alloc(void);
extern void nfs_readhdr_free(struct nfs_pgio_header *hdr);
-extern void nfs_read_completion(struct nfs_pgio_header *hdr);
extern struct nfs_read_data *nfs_readdata_alloc(struct nfs_pgio_header *hdr,
unsigned int pagecount);
extern int nfs_initiate_read(struct rpc_clnt *clnt,
extern int nfs_generic_pagein(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr);
extern void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio,
- struct inode *inode);
+ struct inode *inode,
+ const struct nfs_pgio_completion_ops *compl_ops);
extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
extern void nfs_readdata_release(struct nfs_read_data *rdata);
/* write.c */
-extern void nfs_async_write_error(struct list_head *head);
extern struct nfs_write_header *nfs_writehdr_alloc(void);
extern void nfs_writehdr_free(struct nfs_pgio_header *hdr);
extern struct nfs_write_data *nfs_writedata_alloc(struct nfs_pgio_header *hdr,
unsigned int pagecount);
-extern void nfs_write_completion(struct nfs_pgio_header *hdr);
extern int nfs_generic_flush(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr);
extern void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
- struct inode *inode, int ioflags);
+ struct inode *inode, int ioflags,
+ const struct nfs_pgio_completion_ops *compl_ops);
extern void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio);
extern void nfs_writedata_release(struct nfs_write_data *wdata);
extern void nfs_commit_free(struct nfs_commit_data *p);
hdr->io_start = req_offset(hdr->req);
hdr->good_bytes = desc->pg_count;
hdr->release = release;
+ hdr->completion_ops = desc->pg_completion_ops;
}
void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
struct inode *inode,
const struct nfs_pageio_ops *pg_ops,
+ const struct nfs_pgio_completion_ops *compl_ops,
size_t bsize,
int io_flags)
{
desc->pg_recoalesce = 0;
desc->pg_inode = inode;
desc->pg_ops = pg_ops;
+ desc->pg_completion_ops = compl_ops;
desc->pg_ioflags = io_flags;
desc->pg_error = 0;
desc->pg_lseg = NULL;
EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
bool
-pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode)
+pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode,
+ const struct nfs_pgio_completion_ops *compl_ops)
{
struct nfs_server *server = NFS_SERVER(inode);
struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
if (ld == NULL)
return false;
- nfs_pageio_init(pgio, inode, ld->pg_read_ops, server->rsize, 0);
+ nfs_pageio_init(pgio, inode, ld->pg_read_ops, compl_ops,
+ server->rsize, 0);
return true;
}
bool
-pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode, int ioflags)
+pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode,
+ int ioflags,
+ const struct nfs_pgio_completion_ops *compl_ops)
{
struct nfs_server *server = NFS_SERVER(inode);
struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
if (ld == NULL)
return false;
- nfs_pageio_init(pgio, inode, ld->pg_write_ops, server->wsize, ioflags);
+ nfs_pageio_init(pgio, inode, ld->pg_write_ops, compl_ops,
+ server->wsize, ioflags);
return true;
}
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
-static int pnfs_write_done_resend_to_mds(struct inode *inode, struct list_head *head)
+static int pnfs_write_done_resend_to_mds(struct inode *inode,
+ struct list_head *head,
+ const struct nfs_pgio_completion_ops *compl_ops)
{
struct nfs_pageio_descriptor pgio;
LIST_HEAD(failed);
/* Resend all requests through the MDS */
- nfs_pageio_init_write_mds(&pgio, inode, FLUSH_STABLE);
+ nfs_pageio_init_write_mds(&pgio, inode, FLUSH_STABLE, compl_ops);
while (!list_empty(head)) {
struct nfs_page *req = nfs_list_entry(head->next);
}
if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
data->task.tk_status = pnfs_write_done_resend_to_mds(hdr->inode,
- &hdr->pages);
+ &hdr->pages,
+ hdr->completion_ops);
}
/*
whdr = nfs_writehdr_alloc();
if (!whdr) {
- nfs_async_write_error(&desc->pg_list);
+ desc->pg_completion_ops->error_cleanup(&hdr->pages);
put_lseg(desc->pg_lseg);
desc->pg_lseg = NULL;
return -ENOMEM;
} else
pnfs_do_multiple_writes(desc, &hdr->rpc_list, desc->pg_ioflags);
if (atomic_dec_and_test(&hdr->refcnt))
- nfs_write_completion(hdr);
+ hdr->completion_ops->completion(hdr);
return ret;
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
-static int pnfs_read_done_resend_to_mds(struct inode *inode, struct list_head *head)
+static int pnfs_read_done_resend_to_mds(struct inode *inode,
+ struct list_head *head,
+ const struct nfs_pgio_completion_ops *compl_ops)
{
struct nfs_pageio_descriptor pgio;
LIST_HEAD(failed);
/* Resend all requests through the MDS */
- nfs_pageio_init_read_mds(&pgio, inode);
+ nfs_pageio_init_read_mds(&pgio, inode, compl_ops);
while (!list_empty(head)) {
struct nfs_page *req = nfs_list_entry(head->next);
}
if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
data->task.tk_status = pnfs_read_done_resend_to_mds(hdr->inode,
- &hdr->pages);
+ &hdr->pages,
+ hdr->completion_ops);
}
/*
rhdr = nfs_readhdr_alloc();
if (!rhdr) {
- nfs_async_read_error(&desc->pg_list);
+ desc->pg_completion_ops->error_cleanup(&desc->pg_list);
ret = -ENOMEM;
put_lseg(desc->pg_lseg);
desc->pg_lseg = NULL;
} else
pnfs_do_multiple_reads(desc, &hdr->rpc_list);
if (atomic_dec_and_test(&hdr->refcnt))
- nfs_read_completion(hdr);
+ hdr->completion_ops->completion(hdr);
return ret;
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
void get_layout_hdr(struct pnfs_layout_hdr *lo);
void put_lseg(struct pnfs_layout_segment *lseg);
-bool pnfs_pageio_init_read(struct nfs_pageio_descriptor *, struct inode *);
-bool pnfs_pageio_init_write(struct nfs_pageio_descriptor *, struct inode *, int);
+bool pnfs_pageio_init_read(struct nfs_pageio_descriptor *, struct inode *,
+ const struct nfs_pgio_completion_ops *);
+bool pnfs_pageio_init_write(struct nfs_pageio_descriptor *, struct inode *,
+ int, const struct nfs_pgio_completion_ops *);
void set_pnfs_layoutdriver(struct nfs_server *, const struct nfs_fh *, u32);
void unset_pnfs_layoutdriver(struct nfs_server *);
static const struct nfs_pageio_ops nfs_pageio_read_ops;
static const struct rpc_call_ops nfs_read_common_ops;
+static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
static struct kmem_cache *nfs_rdata_cachep;
else
rdata->header = NULL;
if (atomic_dec_and_test(&hdr->refcnt))
- nfs_read_completion(hdr);
+ hdr->completion_ops->completion(hdr);
}
static
}
void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio,
- struct inode *inode)
+ struct inode *inode,
+ const struct nfs_pgio_completion_ops *compl_ops)
{
- nfs_pageio_init(pgio, inode, &nfs_pageio_read_ops,
+ nfs_pageio_init(pgio, inode, &nfs_pageio_read_ops, compl_ops,
NFS_SERVER(inode)->rsize, 0);
}
EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
static void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
- struct inode *inode)
+ struct inode *inode,
+ const struct nfs_pgio_completion_ops *compl_ops)
{
- if (!pnfs_pageio_init_read(pgio, inode))
- nfs_pageio_init_read_mds(pgio, inode);
+ if (!pnfs_pageio_init_read(pgio, inode, compl_ops))
+ nfs_pageio_init_read_mds(pgio, inode, compl_ops);
}
int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
if (len < PAGE_CACHE_SIZE)
zero_user_segment(page, len, PAGE_CACHE_SIZE);
- nfs_pageio_init_read(&pgio, inode);
+ nfs_pageio_init_read(&pgio, inode, &nfs_async_read_completion_ops);
nfs_pageio_add_request(&pgio, new);
nfs_pageio_complete(&pgio);
return 0;
}
/* Note io was page aligned */
-void nfs_read_completion(struct nfs_pgio_header *hdr)
+static void nfs_read_completion(struct nfs_pgio_header *hdr)
{
unsigned long bytes = 0;
return ret;
}
-void
+static void
nfs_async_read_error(struct list_head *head)
{
struct nfs_page *req;
}
}
+static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
+ .error_cleanup = nfs_async_read_error,
+ .completion = nfs_read_completion,
+};
+
/*
* Generate multiple requests to fill a single page.
*
list_del(&data->list);
nfs_readdata_release(data);
}
- nfs_async_read_error(&hdr->pages);
+ desc->pg_completion_ops->error_cleanup(&hdr->pages);
return -ENOMEM;
}
data = nfs_readdata_alloc(hdr, nfs_page_array_len(desc->pg_base,
desc->pg_count));
if (!data) {
- nfs_async_read_error(head);
+ desc->pg_completion_ops->error_cleanup(head);
ret = -ENOMEM;
goto out;
}
rhdr = nfs_readhdr_alloc();
if (!rhdr) {
- nfs_async_read_error(&desc->pg_list);
+ desc->pg_completion_ops->error_cleanup(&desc->pg_list);
return -ENOMEM;
}
hdr = &rhdr->header;
else
set_bit(NFS_IOHDR_REDO, &hdr->flags);
if (atomic_dec_and_test(&hdr->refcnt))
- nfs_read_completion(hdr);
+ hdr->completion_ops->completion(hdr);
return ret;
}
if (ret == 0)
goto read_complete; /* all pages were read */
- nfs_pageio_init_read(&pgio, inode);
+ nfs_pageio_init_read(&pgio, inode, &nfs_async_read_completion_ops);
ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
* Local function declarations
*/
static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc,
- struct inode *inode, int ioflags);
+ struct inode *inode, int ioflags,
+ const struct nfs_pgio_completion_ops *compl_ops);
static void nfs_redirty_request(struct nfs_page *req);
static const struct rpc_call_ops nfs_write_common_ops;
static const struct rpc_call_ops nfs_commit_ops;
+static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
static struct kmem_cache *nfs_wdata_cachep;
static mempool_t *nfs_wdata_mempool;
else
wdata->header = NULL;
if (atomic_dec_and_test(&hdr->refcnt))
- nfs_write_completion(hdr);
+ hdr->completion_ops->completion(hdr);
}
static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
struct nfs_pageio_descriptor pgio;
int err;
- nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc));
+ nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc),
+ &nfs_async_write_completion_ops);
err = nfs_do_writepage(page, wbc, &pgio);
nfs_pageio_complete(&pgio);
if (err < 0)
nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
- nfs_pageio_init_write(&pgio, inode, wb_priority(wbc));
+ nfs_pageio_init_write(&pgio, inode, wb_priority(wbc),
+ &nfs_async_write_completion_ops);
err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
nfs_pageio_complete(&pgio);
#endif
-void nfs_write_completion(struct nfs_pgio_header *hdr)
+static void nfs_write_completion(struct nfs_pgio_header *hdr)
{
unsigned long bytes = 0;
nfs_end_page_writeback(page);
}
-void nfs_async_write_error(struct list_head *head)
+static void nfs_async_write_error(struct list_head *head)
{
struct nfs_page *req;
}
}
+static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = {
+ .error_cleanup = nfs_async_write_error,
+ .completion = nfs_write_completion,
+};
+
/*
* Generate multiple small requests to write out a single
* contiguous dirty area on one page.
list_del(&data->list);
nfs_writedata_release(data);
}
- nfs_async_write_error(&hdr->pages);
+ desc->pg_completion_ops->error_cleanup(&hdr->pages);
return -ENOMEM;
}
data = nfs_writedata_alloc(hdr, nfs_page_array_len(desc->pg_base,
desc->pg_count));
if (!data) {
- nfs_async_write_error(head);
+ desc->pg_completion_ops->error_cleanup(head);
ret = -ENOMEM;
goto out;
}
whdr = nfs_writehdr_alloc();
if (!whdr) {
- nfs_async_write_error(&desc->pg_list);
+ desc->pg_completion_ops->error_cleanup(&hdr->pages);
return -ENOMEM;
}
hdr = &whdr->header;
else
set_bit(NFS_IOHDR_REDO, &hdr->flags);
if (atomic_dec_and_test(&hdr->refcnt))
- nfs_write_completion(hdr);
+ hdr->completion_ops->completion(hdr);
return ret;
}
};
void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
- struct inode *inode, int ioflags)
+ struct inode *inode, int ioflags,
+ const struct nfs_pgio_completion_ops *compl_ops)
{
- nfs_pageio_init(pgio, inode, &nfs_pageio_write_ops,
+ nfs_pageio_init(pgio, inode, &nfs_pageio_write_ops, compl_ops,
NFS_SERVER(inode)->wsize, ioflags);
}
EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
- struct inode *inode, int ioflags)
+ struct inode *inode, int ioflags,
+ const struct nfs_pgio_completion_ops *compl_ops)
{
- if (!pnfs_pageio_init_write(pgio, inode, ioflags))
- nfs_pageio_init_write_mds(pgio, inode, ioflags);
+ if (!pnfs_pageio_init_write(pgio, inode, ioflags, compl_ops))
+ nfs_pageio_init_write_mds(pgio, inode, ioflags, compl_ops);
}
void nfs_write_prepare(struct rpc_task *task, void *calldata)
int pg_ioflags;
int pg_error;
const struct rpc_call_ops *pg_rpc_callops;
+ const struct nfs_pgio_completion_ops *pg_completion_ops;
struct pnfs_layout_segment *pg_lseg;
};
extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
struct inode *inode,
const struct nfs_pageio_ops *pg_ops,
+ const struct nfs_pgio_completion_ops *compl_ops,
size_t bsize,
int how);
extern int nfs_pageio_add_request(struct nfs_pageio_descriptor *,
loff_t io_start;
const struct rpc_call_ops *mds_ops;
void (*release) (struct nfs_pgio_header *hdr);
+ const struct nfs_pgio_completion_ops *completion_ops;
spinlock_t lock;
/* fields protected by lock */
int pnfs_error;
int (*commit_done_cb) (struct rpc_task *task, struct nfs_commit_data *data);
};
+struct nfs_pgio_completion_ops {
+ void (*error_cleanup)(struct list_head *head);
+ void (*completion)(struct nfs_pgio_header *hdr);
+};
+
struct nfs_unlinkdata {
struct hlist_node list;
struct nfs_removeargs args;