extern void free_page_and_swap_cache(struct page *);
extern void free_pages_and_swap_cache(struct page **, int);
extern struct page *lookup_swap_cache(swp_entry_t);
-extern struct page *read_swap_cache_async(swp_entry_t,
+extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
struct vm_area_struct *vma, unsigned long addr);
-extern struct page *swapin_readahead(swp_entry_t,
+extern struct page *swapin_readahead(swp_entry_t, gfp_t,
struct vm_area_struct *vma, unsigned long addr);
/* linux/mm/swapfile.c */
{
}
-static inline struct page *swapin_readahead(swp_entry_t swp,
+static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr)
{
return NULL;
page = lookup_swap_cache(entry);
if (!page) {
grab_swap_token(); /* Contend for token _before_ read-in */
- page = swapin_readahead(entry, vma, address);
+ page = swapin_readahead(entry,
+ GFP_HIGHUSER_MOVABLE, vma, address);
if (!page) {
/*
* Back out if somebody else faulted in this pte
return err;
}
-static struct page *shmem_swapin(struct shmem_inode_info *info,
- swp_entry_t entry, unsigned long idx)
+static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
+ struct shmem_inode_info *info, unsigned long idx)
{
struct vm_area_struct pvma;
struct page *page;
pvma.vm_pgoff = idx;
pvma.vm_ops = NULL;
pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
- page = swapin_readahead(entry, &pvma, 0);
+ page = swapin_readahead(entry, gfp, &pvma, 0);
mpol_free(pvma.vm_policy);
return page;
}
-static struct page *shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info,
- unsigned long idx)
+static struct page *shmem_alloc_page(gfp_t gfp,
+ struct shmem_inode_info *info, unsigned long idx)
{
struct vm_area_struct pvma;
struct page *page;
return 1;
}
-static inline struct page *
-shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
+static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
+ struct shmem_inode_info *info, unsigned long idx)
{
- return swapin_readahead(entry, NULL, 0);
+ return swapin_readahead(entry, gfp, NULL, 0);
}
-static inline struct page *
-shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx)
+static inline struct page *shmem_alloc_page(gfp_t gfp,
+ struct shmem_inode_info *info, unsigned long idx)
{
return alloc_page(gfp);
}
struct page *swappage;
swp_entry_t *entry;
swp_entry_t swap;
+ gfp_t gfp;
int error;
if (idx >= SHMEM_MAX_INDEX)
error = 0;
if (sgp == SGP_QUICK)
goto failed;
+ gfp = mapping_gfp_mask(mapping);
spin_lock(&info->lock);
shmem_recalc_inode(inode);
*type |= VM_FAULT_MAJOR;
}
spin_unlock(&info->lock);
- swappage = shmem_swapin(info, swap, idx);
+ swappage = shmem_swapin(swap, gfp, info, idx);
if (!swappage) {
spin_lock(&info->lock);
entry = shmem_swp_alloc(info, idx, sgp);
if (!filepage) {
spin_unlock(&info->lock);
- filepage = shmem_alloc_page(mapping_gfp_mask(mapping),
- info,
- idx);
+ filepage = shmem_alloc_page(gfp, info, idx);
if (!filepage) {
shmem_unacct_blocks(info->flags, 1);
shmem_free_blocks(inode, 1);
return error;
}
-static int add_to_swap_cache(struct page *page, swp_entry_t entry)
+static int add_to_swap_cache(struct page *page, swp_entry_t entry,
+ gfp_t gfp_mask)
{
int error;
return -ENOENT;
}
SetPageLocked(page);
- error = __add_to_swap_cache(page, entry, GFP_KERNEL);
+ error = __add_to_swap_cache(page, entry, gfp_mask & GFP_KERNEL);
/*
* Anon pages are already on the LRU, we don't run lru_cache_add here.
*/
* A failure return means that either the page allocation failed or that
* the swap entry is no longer in use.
*/
-struct page *read_swap_cache_async(swp_entry_t entry,
+struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr)
{
struct page *found_page, *new_page = NULL;
* Get a new page to read into from swap.
*/
if (!new_page) {
- new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
- vma, addr);
+ new_page = alloc_page_vma(gfp_mask, vma, addr);
if (!new_page)
break; /* Out of memory */
}
* the just freed swap entry for an existing page.
* May fail (-ENOMEM) if radix-tree node allocation failed.
*/
- err = add_to_swap_cache(new_page, entry);
+ err = add_to_swap_cache(new_page, entry, gfp_mask);
if (!err) {
/*
* Initiate read into locked page and return.
*
* Caller must hold down_read on the vma->vm_mm if vma is not NULL.
*/
-struct page *swapin_readahead(swp_entry_t entry,
+struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr)
{
int nr_pages;
for (end_offset = offset + nr_pages; offset < end_offset; offset++) {
/* Ok, do the async read-ahead now */
page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
- vma, addr);
+ gfp_mask, vma, addr);
if (!page)
break;
page_cache_release(page);
}
lru_add_drain(); /* Push any new pages onto the LRU now */
- return read_swap_cache_async(entry, vma, addr);
+ return read_swap_cache_async(entry, gfp_mask, vma, addr);
}
*/
swap_map = &si->swap_map[i];
entry = swp_entry(type, i);
- page = read_swap_cache_async(entry, NULL, 0);
+ page = read_swap_cache_async(entry,
+ GFP_HIGHUSER_MOVABLE, NULL, 0);
if (!page) {
/*
* Either swap_duplicate() failed because entry