set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
}
+static struct nfs_page *
+nfs_page_private_request(struct page *page)
+{
+ if (!PagePrivate(page))
+ return NULL;
+ return (struct nfs_page *)page_private(page);
+}
+
/*
* nfs_page_find_head_request_locked - find head request associated with @page
*
static struct nfs_page *
nfs_page_find_head_request_locked(struct nfs_inode *nfsi, struct page *page)
{
- struct nfs_page *req = NULL;
+ struct nfs_page *req;
- if (PagePrivate(page))
- req = (struct nfs_page *)page_private(page);
- else if (unlikely(PageSwapCache(page)))
+ req = nfs_page_private_request(page);
+ if (!req && unlikely(PageSwapCache(page)))
req = nfs_page_search_commits_for_head_request_locked(nfsi,
page);
int ret;
try_again:
- if (!(PagePrivate(page) || PageSwapCache(page)))
- return NULL;
- spin_lock(&inode->i_lock);
/*
* A reference is taken only on the head request which acts as a
* reference to the whole page group - the group will not be destroyed
* until the head reference is released.
*/
- head = nfs_page_find_head_request_locked(NFS_I(inode), page);
-
- if (!head) {
- spin_unlock(&inode->i_lock);
+ head = nfs_page_find_head_request(page);
+ if (!head)
return NULL;
- }
/* lock the page head first in order to avoid an ABBA inefficiency */
if (!nfs_lock_request(head)) {
- spin_unlock(&inode->i_lock);
ret = nfs_wait_on_request(head);
nfs_release_request(head);
if (ret < 0)
return ERR_PTR(ret);
goto try_again;
}
- spin_unlock(&inode->i_lock);
+
+ /* Ensure that nobody removed the request before we locked it */
+ if (head != nfs_page_private_request(page) && !PageSwapCache(page)) {
+ nfs_unlock_and_release_request(head);
+ goto try_again;
+ }
ret = nfs_page_group_lock(head);
if (ret < 0) {
return NULL;
}
- /* still holds ref on head from nfs_page_find_head_request_locked
+ /* still holds ref on head from nfs_page_find_head_request
* and still has lock on head from lock loop */
return head;
}