struct bio *bio;
unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
int gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
- unsigned long flags = current->flags;
unsigned int i;
/*
- * Tell VM to act less aggressively and fail earlier.
- * This is not necessary but increases throughput.
+ * Use __GFP_NOMEMALLOC to tell the VM to act less aggressively and
+ * to fail earlier. This is not necessary but increases throughput.
* FIXME: Is this really intelligent?
*/
- current->flags &= ~PF_MEMALLOC;
-
if (base_bio)
- bio = bio_clone(base_bio, GFP_NOIO);
+ bio = bio_clone(base_bio, GFP_NOIO|__GFP_NOMEMALLOC);
else
- bio = bio_alloc(GFP_NOIO, nr_iovecs);
- if (!bio) {
- if (flags & PF_MEMALLOC)
- current->flags |= PF_MEMALLOC;
+ bio = bio_alloc(GFP_NOIO|__GFP_NOMEMALLOC, nr_iovecs);
+ if (!bio)
return NULL;
- }
/* if the last bio was not complete, continue where that one ended */
bio->bi_idx = *bio_vec_idx;
size -= bv->bv_len;
}
- if (flags & PF_MEMALLOC)
- current->flags |= PF_MEMALLOC;
-
if (!bio->bi_size) {
bio_put(bio);
return NULL;
int add_to_swap(struct page * page)
{
swp_entry_t entry;
- int pf_flags;
int err;
if (!PageLocked(page))
if (!entry.val)
return 0;
- /* Radix-tree node allocations are performing
- * GFP_ATOMIC allocations under PF_MEMALLOC.
- * They can completely exhaust the page allocator.
- *
- * So PF_MEMALLOC is dropped here. This causes the slab
- * allocations to fail earlier, so radix-tree nodes will
- * then be allocated from the mempool reserves.
+ /*
+ * Radix-tree node allocations from PF_MEMALLOC contexts could
+ * completely exhaust the page allocator. __GFP_NOMEMALLOC
+ * stops emergency reserves from being allocated.
*
- * We're still using __GFP_HIGH for radix-tree node
- * allocations, so some of the emergency pools are available,
- * just not all of them.
+ * TODO: this could cause a theoretical memory reclaim
+ * deadlock in the swap out path.
*/
-
- pf_flags = current->flags;
- current->flags &= ~PF_MEMALLOC;
-
/*
* Add it to the swap cache and mark it dirty
*/
- err = __add_to_swap_cache(page, entry, GFP_ATOMIC|__GFP_NOWARN);
-
- if (pf_flags & PF_MEMALLOC)
- current->flags |= PF_MEMALLOC;
+ err = __add_to_swap_cache(page, entry,
+ GFP_ATOMIC|__GFP_NOMEMALLOC|__GFP_NOWARN);
switch (err) {
case 0: /* Success */