#include "internal.h"
+#define MUTEX_RETRY_COUNT (65536)
+#define MUTEX_RETRY_RESCHED (1024)
+
+
+
static struct kmem_cache *anon_vma_cachep;
static struct kmem_cache *anon_vma_chain_cachep;
* LOCK should suffice since the actual taking of the lock must
* happen _before_ what follows.
*/
- if (rwsem_is_locked(&anon_vma->root->rwsem)) {
+ might_sleep();
+ if (anon_vma->root && rwsem_is_locked(&anon_vma->root->rwsem)) {
anon_vma_lock_write(anon_vma);
anon_vma_unlock_write(anon_vma);
}
* above cannot corrupt).
*/
if (!page_mapped(page)) {
+ rcu_read_unlock();
put_anon_vma(anon_vma);
- anon_vma = NULL;
+ return NULL;
}
out:
rcu_read_unlock();
}
if (!page_mapped(page)) {
+ rcu_read_unlock();
put_anon_vma(anon_vma);
- anon_vma = NULL;
- goto out;
+ return NULL;
}
/* we pinned the anon_vma, its safe to sleep */
spinlock_t *ptl;
if (unlikely(PageHuge(page))) {
+ /* when pud is not present, pte will be NULL */
pte = huge_pte_offset(mm, address);
+ if (!pte)
+ return NULL;
+
ptl = &mm->page_table_lock;
goto check;
}
*/
BUG_ON(!PageLocked(page));
- mutex_lock(&mapping->i_mmap_mutex);
+ //To avoid deadlock
+ if (!mutex_trylock(&mapping->i_mmap_mutex))
+ return 1; //put in active list
/*
* i_mmap_mutex does not stabilize mapcount at all, but mapcount
BUG_ON(!page || PageAnon(page));
if (locked_vma) {
- mlock_vma_page(page); /* no-op if already mlocked */
- if (page == check_page)
+ if (page == check_page) {
+ /* we know we have check_page locked */
+ mlock_vma_page(page);
ret = SWAP_MLOCK;
+ } else if (trylock_page(page)) {
+ /*
+ * If we can lock the page, perform mlock.
+ * Otherwise leave the page alone, it will be
+ * eventually encountered again later.
+ */
+ mlock_vma_page(page);
+ unlock_page(page);
+ }
continue; /* don't unmap */
}
unsigned long max_nl_cursor = 0;
unsigned long max_nl_size = 0;
unsigned int mapcount;
+ int retry = 0;
+
if (PageHuge(page))
pgoff = page->index << compound_order(page);
- mutex_lock(&mapping->i_mmap_mutex);
+ while (!mutex_trylock(&mapping->i_mmap_mutex)) {
+ retry++;
+ if (!(retry % MUTEX_RETRY_RESCHED))
+ cond_resched();
+ if (retry > MUTEX_RETRY_COUNT) {
+ printk(KERN_ERR ">> failed to lock i_mmap_mutex in try_to_unmap_file <<\n");
+ return SWAP_FAIL;
+ }
+ }
+
+
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
unsigned long address = vma_address(page, vma);
ret = try_to_unmap_one(page, vma, address, flags);
{
struct anon_vma *root = anon_vma->root;
- if (root != anon_vma && atomic_dec_and_test(&root->refcount))
- anon_vma_free(root);
-
anon_vma_free(anon_vma);
+ if (root && root != anon_vma && atomic_dec_and_test(&root->refcount))
+ anon_vma_free(root);
}
#ifdef CONFIG_MIGRATION