*
* Freeing policy:
* Mempolicy objects are reference counted. A mempolicy will be freed when
- * mpol_free() decrements the reference count to zero.
+ * mpol_put() decrements the reference count to zero.
*
* Copying policy objects:
* mpol_copy() allocates a new mempolicy and copies the specified mempolicy
* The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
*/
-extern void __mpol_free(struct mempolicy *pol);
-static inline void mpol_free(struct mempolicy *pol)
+extern void __mpol_put(struct mempolicy *pol);
+static inline void mpol_put(struct mempolicy *pol)
{
if (pol)
- __mpol_free(pol);
+ __mpol_put(pol);
}
extern struct mempolicy *__mpol_copy(struct mempolicy *pol);
return 1;
}
-static inline void mpol_free(struct mempolicy *p)
+static inline void mpol_put(struct mempolicy *p)
{
}
if (!err) {
mpol_get(new);
vma->vm_policy = new;
- mpol_free(old);
+ mpol_put(old);
}
return err;
}
new = mpol_new(mode, flags, nodes);
if (IS_ERR(new))
return PTR_ERR(new);
- mpol_free(current->mempolicy);
+ mpol_put(current->mempolicy);
current->mempolicy = new;
mpol_set_task_struct_flag();
if (new && new->policy == MPOL_INTERLEAVE &&
}
up_write(&mm->mmap_sem);
- mpol_free(new);
+ mpol_put(new);
return err;
}
nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
if (unlikely(pol != &default_policy &&
pol != current->mempolicy))
- __mpol_free(pol); /* finished with pol */
+ __mpol_put(pol); /* finished with pol */
return node_zonelist(nid, gfp_flags);
}
zl = zonelist_policy(GFP_HIGHUSER, pol);
if (unlikely(pol != &default_policy && pol != current->mempolicy)) {
if (pol->policy != MPOL_BIND)
- __mpol_free(pol); /* finished with pol */
+ __mpol_put(pol); /* finished with pol */
else
*mpol = pol; /* unref needed after allocation */
}
nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
if (unlikely(pol != &default_policy &&
pol != current->mempolicy))
- __mpol_free(pol); /* finished with pol */
+ __mpol_put(pol); /* finished with pol */
return alloc_page_interleave(gfp, 0, nid);
}
zl = zonelist_policy(gfp, pol);
*/
struct page *page = __alloc_pages_nodemask(gfp, 0,
zl, nodemask_policy(gfp, pol));
- __mpol_free(pol);
+ __mpol_put(pol);
return page;
}
/*
}
/* Slow path of a mpol destructor. */
-void __mpol_free(struct mempolicy *p)
+void __mpol_put(struct mempolicy *p)
{
if (!atomic_dec_and_test(&p->refcnt))
return;
{
pr_debug("deleting %lx-l%lx\n", n->start, n->end);
rb_erase(&n->nd, &sp->root);
- mpol_free(n->policy);
+ mpol_put(n->policy);
kmem_cache_free(sn_cache, n);
}
sp_insert(sp, new);
spin_unlock(&sp->lock);
if (new2) {
- mpol_free(new2->policy);
+ mpol_put(new2->policy);
kmem_cache_free(sn_cache, new2);
}
return 0;
/* Policy covers entire file */
pvma.vm_end = TASK_SIZE;
mpol_set_shared_policy(info, &pvma, newpol);
- mpol_free(newpol);
+ mpol_put(newpol);
}
}
}
n = rb_entry(next, struct sp_node, nd);
next = rb_next(&n->nd);
rb_erase(&n->nd, &p->root);
- mpol_free(n->policy);
+ mpol_put(n->policy);
kmem_cache_free(sn_cache, n);
}
spin_unlock(&p->lock);
* unref shared or other task's mempolicy
*/
if (pol != &default_policy && pol != current->mempolicy)
- __mpol_free(pol);
+ __mpol_put(pol);
seq_printf(m, "%08lx %s", vma->vm_start, buffer);
vma->vm_ops->close(vma);
if (vma->vm_file)
fput(vma->vm_file);
- mpol_free(vma_policy(vma));
+ mpol_put(vma_policy(vma));
kmem_cache_free(vm_area_cachep, vma);
return next;
}
if (file)
fput(file);
mm->map_count--;
- mpol_free(vma_policy(next));
+ mpol_put(vma_policy(next));
kmem_cache_free(vm_area_cachep, next);
/*
* In mprotect's case 6 (see comments on vma_merge),
if (file && vma_merge(mm, prev, addr, vma->vm_end,
vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
- mpol_free(vma_policy(vma));
+ mpol_put(vma_policy(vma));
kmem_cache_free(vm_area_cachep, vma);
fput(file);
} else {
pvma.vm_ops = NULL;
pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
page = swapin_readahead(entry, gfp, &pvma, 0);
- mpol_free(pvma.vm_policy);
+ mpol_put(pvma.vm_policy);
return page;
}
pvma.vm_ops = NULL;
pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
page = alloc_page_vma(gfp, &pvma, 0);
- mpol_free(pvma.vm_policy);
+ mpol_put(pvma.vm_policy);
return page;
}
#else /* !CONFIG_NUMA */