e1000e: fix compiler warnings
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / mm / mempolicy.c
CommitLineData
1da177e4
LT
1/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
8bccd85f 5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
1da177e4
LT
6 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
8bccd85f 21 *
1da177e4
LT
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
8bccd85f
CL
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
1da177e4 28 * preferred Try a specific node first before normal fallback.
00ef2d2f 29 * As a special case NUMA_NO_NODE here means do the allocation
1da177e4
LT
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
8bccd85f 33 *
1da177e4
LT
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
1da177e4
LT
66*/
67
68#include <linux/mempolicy.h>
69#include <linux/mm.h>
70#include <linux/highmem.h>
71#include <linux/hugetlb.h>
72#include <linux/kernel.h>
73#include <linux/sched.h>
1da177e4
LT
74#include <linux/nodemask.h>
75#include <linux/cpuset.h>
1da177e4
LT
76#include <linux/slab.h>
77#include <linux/string.h>
b95f1b31 78#include <linux/export.h>
b488893a 79#include <linux/nsproxy.h>
1da177e4
LT
80#include <linux/interrupt.h>
81#include <linux/init.h>
82#include <linux/compat.h>
dc9aa5b9 83#include <linux/swap.h>
1a75a6c8
CL
84#include <linux/seq_file.h>
85#include <linux/proc_fs.h>
b20a3503 86#include <linux/migrate.h>
62b61f61 87#include <linux/ksm.h>
95a402c3 88#include <linux/rmap.h>
86c3a764 89#include <linux/security.h>
dbcb0f19 90#include <linux/syscalls.h>
095f1fc4 91#include <linux/ctype.h>
6d9c285a 92#include <linux/mm_inline.h>
b24f53a0 93#include <linux/mmu_notifier.h>
dc9aa5b9 94
1da177e4
LT
95#include <asm/tlbflush.h>
96#include <asm/uaccess.h>
778d3b0f 97#include <linux/random.h>
1da177e4 98
62695a84
NP
99#include "internal.h"
100
38e35860 101/* Internal flags */
dc9aa5b9 102#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
38e35860 103#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
dc9aa5b9 104
fcc234f8
PE
105static struct kmem_cache *policy_cache;
106static struct kmem_cache *sn_cache;
1da177e4 107
1da177e4
LT
108/* Highest zone. An specific allocation for a zone below that is not
109 policied. */
6267276f 110enum zone_type policy_zone = 0;
1da177e4 111
bea904d5
LS
112/*
113 * run-time system-wide default policy => local allocation
114 */
e754d79d 115static struct mempolicy default_policy = {
1da177e4 116 .refcnt = ATOMIC_INIT(1), /* never free it */
bea904d5 117 .mode = MPOL_PREFERRED,
fc36b8d3 118 .flags = MPOL_F_LOCAL,
1da177e4
LT
119};
120
5606e387
MG
121static struct mempolicy preferred_node_policy[MAX_NUMNODES];
122
123static struct mempolicy *get_task_policy(struct task_struct *p)
124{
125 struct mempolicy *pol = p->mempolicy;
126 int node;
127
128 if (!pol) {
129 node = numa_node_id();
00ef2d2f 130 if (node != NUMA_NO_NODE)
5606e387
MG
131 pol = &preferred_node_policy[node];
132
133 /* preferred_node_policy is not initialised early in boot */
134 if (!pol->mode)
135 pol = NULL;
136 }
137
138 return pol;
139}
140
37012946
DR
141static const struct mempolicy_operations {
142 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
708c1bbc
MX
143 /*
144 * If read-side task has no lock to protect task->mempolicy, write-side
145 * task will rebind the task->mempolicy by two step. The first step is
146 * setting all the newly nodes, and the second step is cleaning all the
147 * disallowed nodes. In this way, we can avoid finding no node to alloc
148 * page.
149 * If we have a lock to protect task->mempolicy in read-side, we do
150 * rebind directly.
151 *
152 * step:
153 * MPOL_REBIND_ONCE - do rebind work at once
154 * MPOL_REBIND_STEP1 - set all the newly nodes
155 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
156 */
157 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
158 enum mpol_rebind_step step);
37012946
DR
159} mpol_ops[MPOL_MAX];
160
19770b32 161/* Check that the nodemask contains at least one populated zone */
37012946 162static int is_valid_nodemask(const nodemask_t *nodemask)
1da177e4 163{
d3eb1570 164 return nodes_intersects(*nodemask, node_states[N_MEMORY]);
1da177e4
LT
165}
166
f5b087b5
DR
167static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
168{
6d556294 169 return pol->flags & MPOL_MODE_FLAGS;
4c50bc01
DR
170}
171
172static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
173 const nodemask_t *rel)
174{
175 nodemask_t tmp;
176 nodes_fold(tmp, *orig, nodes_weight(*rel));
177 nodes_onto(*ret, tmp, *rel);
f5b087b5
DR
178}
179
37012946
DR
180static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
181{
182 if (nodes_empty(*nodes))
183 return -EINVAL;
184 pol->v.nodes = *nodes;
185 return 0;
186}
187
188static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
189{
190 if (!nodes)
fc36b8d3 191 pol->flags |= MPOL_F_LOCAL; /* local allocation */
37012946
DR
192 else if (nodes_empty(*nodes))
193 return -EINVAL; /* no allowed nodes */
194 else
195 pol->v.preferred_node = first_node(*nodes);
196 return 0;
197}
198
199static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
200{
201 if (!is_valid_nodemask(nodes))
202 return -EINVAL;
203 pol->v.nodes = *nodes;
204 return 0;
205}
206
58568d2a
MX
207/*
208 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
209 * any, for the new policy. mpol_new() has already validated the nodes
210 * parameter with respect to the policy mode and flags. But, we need to
211 * handle an empty nodemask with MPOL_PREFERRED here.
212 *
213 * Must be called holding task's alloc_lock to protect task's mems_allowed
214 * and mempolicy. May also be called holding the mmap_semaphore for write.
215 */
4bfc4495
KH
216static int mpol_set_nodemask(struct mempolicy *pol,
217 const nodemask_t *nodes, struct nodemask_scratch *nsc)
58568d2a 218{
58568d2a
MX
219 int ret;
220
221 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
222 if (pol == NULL)
223 return 0;
01f13bd6 224 /* Check N_MEMORY */
4bfc4495 225 nodes_and(nsc->mask1,
01f13bd6 226 cpuset_current_mems_allowed, node_states[N_MEMORY]);
58568d2a
MX
227
228 VM_BUG_ON(!nodes);
229 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
230 nodes = NULL; /* explicit local allocation */
231 else {
232 if (pol->flags & MPOL_F_RELATIVE_NODES)
4bfc4495 233 mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
58568d2a 234 else
4bfc4495
KH
235 nodes_and(nsc->mask2, *nodes, nsc->mask1);
236
58568d2a
MX
237 if (mpol_store_user_nodemask(pol))
238 pol->w.user_nodemask = *nodes;
239 else
240 pol->w.cpuset_mems_allowed =
241 cpuset_current_mems_allowed;
242 }
243
4bfc4495
KH
244 if (nodes)
245 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
246 else
247 ret = mpol_ops[pol->mode].create(pol, NULL);
58568d2a
MX
248 return ret;
249}
250
251/*
252 * This function just creates a new policy, does some check and simple
253 * initialization. You must invoke mpol_set_nodemask() to set nodes.
254 */
028fec41
DR
255static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
256 nodemask_t *nodes)
1da177e4
LT
257{
258 struct mempolicy *policy;
259
028fec41 260 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
00ef2d2f 261 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
140d5a49 262
3e1f0645
DR
263 if (mode == MPOL_DEFAULT) {
264 if (nodes && !nodes_empty(*nodes))
37012946 265 return ERR_PTR(-EINVAL);
d3a71033 266 return NULL;
37012946 267 }
3e1f0645
DR
268 VM_BUG_ON(!nodes);
269
270 /*
271 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
272 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
273 * All other modes require a valid pointer to a non-empty nodemask.
274 */
275 if (mode == MPOL_PREFERRED) {
276 if (nodes_empty(*nodes)) {
277 if (((flags & MPOL_F_STATIC_NODES) ||
278 (flags & MPOL_F_RELATIVE_NODES)))
279 return ERR_PTR(-EINVAL);
3e1f0645 280 }
479e2802
PZ
281 } else if (mode == MPOL_LOCAL) {
282 if (!nodes_empty(*nodes))
283 return ERR_PTR(-EINVAL);
284 mode = MPOL_PREFERRED;
3e1f0645
DR
285 } else if (nodes_empty(*nodes))
286 return ERR_PTR(-EINVAL);
1da177e4
LT
287 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
288 if (!policy)
289 return ERR_PTR(-ENOMEM);
290 atomic_set(&policy->refcnt, 1);
45c4745a 291 policy->mode = mode;
3e1f0645 292 policy->flags = flags;
37012946 293
1da177e4 294 return policy;
37012946
DR
295}
296
52cd3b07
LS
297/* Slow path of a mpol destructor. */
298void __mpol_put(struct mempolicy *p)
299{
300 if (!atomic_dec_and_test(&p->refcnt))
301 return;
52cd3b07
LS
302 kmem_cache_free(policy_cache, p);
303}
304
708c1bbc
MX
305static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
306 enum mpol_rebind_step step)
37012946
DR
307{
308}
309
708c1bbc
MX
310/*
311 * step:
312 * MPOL_REBIND_ONCE - do rebind work at once
313 * MPOL_REBIND_STEP1 - set all the newly nodes
314 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
315 */
316static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
317 enum mpol_rebind_step step)
37012946
DR
318{
319 nodemask_t tmp;
320
321 if (pol->flags & MPOL_F_STATIC_NODES)
322 nodes_and(tmp, pol->w.user_nodemask, *nodes);
323 else if (pol->flags & MPOL_F_RELATIVE_NODES)
324 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
325 else {
708c1bbc
MX
326 /*
327 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
328 * result
329 */
330 if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
331 nodes_remap(tmp, pol->v.nodes,
332 pol->w.cpuset_mems_allowed, *nodes);
333 pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
334 } else if (step == MPOL_REBIND_STEP2) {
335 tmp = pol->w.cpuset_mems_allowed;
336 pol->w.cpuset_mems_allowed = *nodes;
337 } else
338 BUG();
37012946 339 }
f5b087b5 340
708c1bbc
MX
341 if (nodes_empty(tmp))
342 tmp = *nodes;
343
344 if (step == MPOL_REBIND_STEP1)
345 nodes_or(pol->v.nodes, pol->v.nodes, tmp);
346 else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
347 pol->v.nodes = tmp;
348 else
349 BUG();
350
37012946
DR
351 if (!node_isset(current->il_next, tmp)) {
352 current->il_next = next_node(current->il_next, tmp);
353 if (current->il_next >= MAX_NUMNODES)
354 current->il_next = first_node(tmp);
355 if (current->il_next >= MAX_NUMNODES)
356 current->il_next = numa_node_id();
357 }
358}
359
360static void mpol_rebind_preferred(struct mempolicy *pol,
708c1bbc
MX
361 const nodemask_t *nodes,
362 enum mpol_rebind_step step)
37012946
DR
363{
364 nodemask_t tmp;
365
37012946
DR
366 if (pol->flags & MPOL_F_STATIC_NODES) {
367 int node = first_node(pol->w.user_nodemask);
368
fc36b8d3 369 if (node_isset(node, *nodes)) {
37012946 370 pol->v.preferred_node = node;
fc36b8d3
LS
371 pol->flags &= ~MPOL_F_LOCAL;
372 } else
373 pol->flags |= MPOL_F_LOCAL;
37012946
DR
374 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
375 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
376 pol->v.preferred_node = first_node(tmp);
fc36b8d3 377 } else if (!(pol->flags & MPOL_F_LOCAL)) {
37012946
DR
378 pol->v.preferred_node = node_remap(pol->v.preferred_node,
379 pol->w.cpuset_mems_allowed,
380 *nodes);
381 pol->w.cpuset_mems_allowed = *nodes;
382 }
1da177e4
LT
383}
384
708c1bbc
MX
385/*
386 * mpol_rebind_policy - Migrate a policy to a different set of nodes
387 *
388 * If read-side task has no lock to protect task->mempolicy, write-side
389 * task will rebind the task->mempolicy by two step. The first step is
390 * setting all the newly nodes, and the second step is cleaning all the
391 * disallowed nodes. In this way, we can avoid finding no node to alloc
392 * page.
393 * If we have a lock to protect task->mempolicy in read-side, we do
394 * rebind directly.
395 *
396 * step:
397 * MPOL_REBIND_ONCE - do rebind work at once
398 * MPOL_REBIND_STEP1 - set all the newly nodes
399 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
400 */
401static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
402 enum mpol_rebind_step step)
1d0d2680 403{
1d0d2680
DR
404 if (!pol)
405 return;
89c522c7 406 if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
1d0d2680
DR
407 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
408 return;
708c1bbc
MX
409
410 if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
411 return;
412
413 if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
414 BUG();
415
416 if (step == MPOL_REBIND_STEP1)
417 pol->flags |= MPOL_F_REBINDING;
418 else if (step == MPOL_REBIND_STEP2)
419 pol->flags &= ~MPOL_F_REBINDING;
420 else if (step >= MPOL_REBIND_NSTEP)
421 BUG();
422
423 mpol_ops[pol->mode].rebind(pol, newmask, step);
1d0d2680
DR
424}
425
426/*
427 * Wrapper for mpol_rebind_policy() that just requires task
428 * pointer, and updates task mempolicy.
58568d2a
MX
429 *
430 * Called with task's alloc_lock held.
1d0d2680
DR
431 */
432
708c1bbc
MX
433void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
434 enum mpol_rebind_step step)
1d0d2680 435{
708c1bbc 436 mpol_rebind_policy(tsk->mempolicy, new, step);
1d0d2680
DR
437}
438
439/*
440 * Rebind each vma in mm to new nodemask.
441 *
442 * Call holding a reference to mm. Takes mm->mmap_sem during call.
443 */
444
445void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
446{
447 struct vm_area_struct *vma;
448
449 down_write(&mm->mmap_sem);
450 for (vma = mm->mmap; vma; vma = vma->vm_next)
708c1bbc 451 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
1d0d2680
DR
452 up_write(&mm->mmap_sem);
453}
454
37012946
DR
455static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
456 [MPOL_DEFAULT] = {
457 .rebind = mpol_rebind_default,
458 },
459 [MPOL_INTERLEAVE] = {
460 .create = mpol_new_interleave,
461 .rebind = mpol_rebind_nodemask,
462 },
463 [MPOL_PREFERRED] = {
464 .create = mpol_new_preferred,
465 .rebind = mpol_rebind_preferred,
466 },
467 [MPOL_BIND] = {
468 .create = mpol_new_bind,
469 .rebind = mpol_rebind_nodemask,
470 },
471};
472
fc301289
CL
473static void migrate_page_add(struct page *page, struct list_head *pagelist,
474 unsigned long flags);
1a75a6c8 475
38e35860 476/* Scan through pages checking if pages follow certain conditions. */
b5810039 477static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
dc9aa5b9
CL
478 unsigned long addr, unsigned long end,
479 const nodemask_t *nodes, unsigned long flags,
38e35860 480 void *private)
1da177e4 481{
91612e0d
HD
482 pte_t *orig_pte;
483 pte_t *pte;
705e87c0 484 spinlock_t *ptl;
941150a3 485
705e87c0 486 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
91612e0d 487 do {
6aab341e 488 struct page *page;
25ba77c1 489 int nid;
91612e0d
HD
490
491 if (!pte_present(*pte))
1da177e4 492 continue;
6aab341e
LT
493 page = vm_normal_page(vma, addr, *pte);
494 if (!page)
1da177e4 495 continue;
053837fc 496 /*
62b61f61
HD
497 * vm_normal_page() filters out zero pages, but there might
498 * still be PageReserved pages to skip, perhaps in a VDSO.
053837fc 499 */
b79bc0a0 500 if (PageReserved(page))
f4598c8b 501 continue;
6aab341e 502 nid = page_to_nid(page);
38e35860
CL
503 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
504 continue;
505
b1f72d18 506 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
fc301289 507 migrate_page_add(page, private, flags);
38e35860
CL
508 else
509 break;
91612e0d 510 } while (pte++, addr += PAGE_SIZE, addr != end);
705e87c0 511 pte_unmap_unlock(orig_pte, ptl);
91612e0d
HD
512 return addr != end;
513}
514
b5810039 515static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
dc9aa5b9
CL
516 unsigned long addr, unsigned long end,
517 const nodemask_t *nodes, unsigned long flags,
38e35860 518 void *private)
91612e0d
HD
519{
520 pmd_t *pmd;
521 unsigned long next;
522
523 pmd = pmd_offset(pud, addr);
524 do {
525 next = pmd_addr_end(addr, end);
e180377f 526 split_huge_page_pmd(vma, addr, pmd);
1a5a9906 527 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
91612e0d 528 continue;
dc9aa5b9 529 if (check_pte_range(vma, pmd, addr, next, nodes,
38e35860 530 flags, private))
91612e0d
HD
531 return -EIO;
532 } while (pmd++, addr = next, addr != end);
533 return 0;
534}
535
b5810039 536static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
dc9aa5b9
CL
537 unsigned long addr, unsigned long end,
538 const nodemask_t *nodes, unsigned long flags,
38e35860 539 void *private)
91612e0d
HD
540{
541 pud_t *pud;
542 unsigned long next;
543
544 pud = pud_offset(pgd, addr);
545 do {
546 next = pud_addr_end(addr, end);
547 if (pud_none_or_clear_bad(pud))
548 continue;
dc9aa5b9 549 if (check_pmd_range(vma, pud, addr, next, nodes,
38e35860 550 flags, private))
91612e0d
HD
551 return -EIO;
552 } while (pud++, addr = next, addr != end);
553 return 0;
554}
555
b5810039 556static inline int check_pgd_range(struct vm_area_struct *vma,
dc9aa5b9
CL
557 unsigned long addr, unsigned long end,
558 const nodemask_t *nodes, unsigned long flags,
38e35860 559 void *private)
91612e0d
HD
560{
561 pgd_t *pgd;
562 unsigned long next;
563
b5810039 564 pgd = pgd_offset(vma->vm_mm, addr);
91612e0d
HD
565 do {
566 next = pgd_addr_end(addr, end);
567 if (pgd_none_or_clear_bad(pgd))
568 continue;
dc9aa5b9 569 if (check_pud_range(vma, pgd, addr, next, nodes,
38e35860 570 flags, private))
91612e0d
HD
571 return -EIO;
572 } while (pgd++, addr = next, addr != end);
573 return 0;
1da177e4
LT
574}
575
b24f53a0
LS
576#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
577/*
4b10e7d5
MG
578 * This is used to mark a range of virtual addresses to be inaccessible.
579 * These are later cleared by a NUMA hinting fault. Depending on these
580 * faults, pages may be migrated for better NUMA placement.
581 *
582 * This is assuming that NUMA faults are handled using PROT_NONE. If
583 * an architecture makes a different choice, it will need further
584 * changes to the core.
b24f53a0 585 */
4b10e7d5
MG
586unsigned long change_prot_numa(struct vm_area_struct *vma,
587 unsigned long addr, unsigned long end)
b24f53a0 588{
4b10e7d5
MG
589 int nr_updated;
590 BUILD_BUG_ON(_PAGE_NUMA != _PAGE_PROTNONE);
b24f53a0 591
4b10e7d5 592 nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1);
03c5a6e1
MG
593 if (nr_updated)
594 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
b24f53a0 595
4b10e7d5 596 return nr_updated;
b24f53a0
LS
597}
598#else
599static unsigned long change_prot_numa(struct vm_area_struct *vma,
600 unsigned long addr, unsigned long end)
601{
602 return 0;
603}
604#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
605
dc9aa5b9
CL
606/*
607 * Check if all pages in a range are on a set of nodes.
608 * If pagelist != NULL then isolate pages from the LRU and
609 * put them on the pagelist.
610 */
1da177e4
LT
611static struct vm_area_struct *
612check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
38e35860 613 const nodemask_t *nodes, unsigned long flags, void *private)
1da177e4
LT
614{
615 int err;
616 struct vm_area_struct *first, *vma, *prev;
617
053837fc 618
1da177e4
LT
619 first = find_vma(mm, start);
620 if (!first)
621 return ERR_PTR(-EFAULT);
622 prev = NULL;
623 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
b24f53a0
LS
624 unsigned long endvma = vma->vm_end;
625
626 if (endvma > end)
627 endvma = end;
628 if (vma->vm_start > start)
629 start = vma->vm_start;
630
dc9aa5b9
CL
631 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
632 if (!vma->vm_next && vma->vm_end < end)
633 return ERR_PTR(-EFAULT);
634 if (prev && prev->vm_end < vma->vm_start)
635 return ERR_PTR(-EFAULT);
636 }
b24f53a0
LS
637
638 if (is_vm_hugetlb_page(vma))
639 goto next;
640
641 if (flags & MPOL_MF_LAZY) {
642 change_prot_numa(vma, start, endvma);
643 goto next;
644 }
645
646 if ((flags & MPOL_MF_STRICT) ||
dc9aa5b9 647 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
b24f53a0 648 vma_migratable(vma))) {
dc9aa5b9 649
dc9aa5b9 650 err = check_pgd_range(vma, start, endvma, nodes,
38e35860 651 flags, private);
1da177e4
LT
652 if (err) {
653 first = ERR_PTR(err);
654 break;
655 }
656 }
b24f53a0 657next:
1da177e4
LT
658 prev = vma;
659 }
660 return first;
661}
662
869833f2
KM
663/*
664 * Apply policy to a single VMA
665 * This must be called with the mmap_sem held for writing.
666 */
667static int vma_replace_policy(struct vm_area_struct *vma,
668 struct mempolicy *pol)
8d34694c 669{
869833f2
KM
670 int err;
671 struct mempolicy *old;
672 struct mempolicy *new;
8d34694c
KM
673
674 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
675 vma->vm_start, vma->vm_end, vma->vm_pgoff,
676 vma->vm_ops, vma->vm_file,
677 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
678
869833f2
KM
679 new = mpol_dup(pol);
680 if (IS_ERR(new))
681 return PTR_ERR(new);
682
683 if (vma->vm_ops && vma->vm_ops->set_policy) {
8d34694c 684 err = vma->vm_ops->set_policy(vma, new);
869833f2
KM
685 if (err)
686 goto err_out;
8d34694c 687 }
869833f2
KM
688
689 old = vma->vm_policy;
690 vma->vm_policy = new; /* protected by mmap_sem */
691 mpol_put(old);
692
693 return 0;
694 err_out:
695 mpol_put(new);
8d34694c
KM
696 return err;
697}
698
1da177e4 699/* Step 2: apply policy to a range and do splits. */
9d8cebd4
KM
700static int mbind_range(struct mm_struct *mm, unsigned long start,
701 unsigned long end, struct mempolicy *new_pol)
1da177e4
LT
702{
703 struct vm_area_struct *next;
9d8cebd4
KM
704 struct vm_area_struct *prev;
705 struct vm_area_struct *vma;
706 int err = 0;
e26a5114 707 pgoff_t pgoff;
9d8cebd4
KM
708 unsigned long vmstart;
709 unsigned long vmend;
1da177e4 710
097d5910 711 vma = find_vma(mm, start);
9d8cebd4
KM
712 if (!vma || vma->vm_start > start)
713 return -EFAULT;
714
097d5910 715 prev = vma->vm_prev;
e26a5114
KM
716 if (start > vma->vm_start)
717 prev = vma;
718
9d8cebd4 719 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
1da177e4 720 next = vma->vm_next;
9d8cebd4
KM
721 vmstart = max(start, vma->vm_start);
722 vmend = min(end, vma->vm_end);
723
e26a5114
KM
724 if (mpol_equal(vma_policy(vma), new_pol))
725 continue;
726
727 pgoff = vma->vm_pgoff +
728 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
9d8cebd4 729 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
e26a5114 730 vma->anon_vma, vma->vm_file, pgoff,
8aacc9f5 731 new_pol);
9d8cebd4
KM
732 if (prev) {
733 vma = prev;
734 next = vma->vm_next;
0bd6f78c
ON
735 if (mpol_equal(vma_policy(vma), new_pol))
736 continue;
737 /* vma_merge() joined vma && vma->next, case 8 */
738 goto replace;
9d8cebd4
KM
739 }
740 if (vma->vm_start != vmstart) {
741 err = split_vma(vma->vm_mm, vma, vmstart, 1);
742 if (err)
743 goto out;
744 }
745 if (vma->vm_end != vmend) {
746 err = split_vma(vma->vm_mm, vma, vmend, 0);
747 if (err)
748 goto out;
749 }
0bd6f78c 750 replace:
869833f2 751 err = vma_replace_policy(vma, new_pol);
8d34694c
KM
752 if (err)
753 goto out;
1da177e4 754 }
9d8cebd4
KM
755
756 out:
1da177e4
LT
757 return err;
758}
759
c61afb18
PJ
760/*
761 * Update task->flags PF_MEMPOLICY bit: set iff non-default
762 * mempolicy. Allows more rapid checking of this (combined perhaps
763 * with other PF_* flag bits) on memory allocation hot code paths.
764 *
765 * If called from outside this file, the task 'p' should -only- be
766 * a newly forked child not yet visible on the task list, because
767 * manipulating the task flags of a visible task is not safe.
768 *
769 * The above limitation is why this routine has the funny name
770 * mpol_fix_fork_child_flag().
771 *
772 * It is also safe to call this with a task pointer of current,
773 * which the static wrapper mpol_set_task_struct_flag() does,
774 * for use within this file.
775 */
776
777void mpol_fix_fork_child_flag(struct task_struct *p)
778{
779 if (p->mempolicy)
780 p->flags |= PF_MEMPOLICY;
781 else
782 p->flags &= ~PF_MEMPOLICY;
783}
784
785static void mpol_set_task_struct_flag(void)
786{
787 mpol_fix_fork_child_flag(current);
788}
789
1da177e4 790/* Set the process memory policy */
028fec41
DR
791static long do_set_mempolicy(unsigned short mode, unsigned short flags,
792 nodemask_t *nodes)
1da177e4 793{
58568d2a 794 struct mempolicy *new, *old;
f4e53d91 795 struct mm_struct *mm = current->mm;
4bfc4495 796 NODEMASK_SCRATCH(scratch);
58568d2a 797 int ret;
1da177e4 798
4bfc4495
KH
799 if (!scratch)
800 return -ENOMEM;
f4e53d91 801
4bfc4495
KH
802 new = mpol_new(mode, flags, nodes);
803 if (IS_ERR(new)) {
804 ret = PTR_ERR(new);
805 goto out;
806 }
f4e53d91
LS
807 /*
808 * prevent changing our mempolicy while show_numa_maps()
809 * is using it.
810 * Note: do_set_mempolicy() can be called at init time
811 * with no 'mm'.
812 */
813 if (mm)
814 down_write(&mm->mmap_sem);
58568d2a 815 task_lock(current);
4bfc4495 816 ret = mpol_set_nodemask(new, nodes, scratch);
58568d2a
MX
817 if (ret) {
818 task_unlock(current);
819 if (mm)
820 up_write(&mm->mmap_sem);
821 mpol_put(new);
4bfc4495 822 goto out;
58568d2a
MX
823 }
824 old = current->mempolicy;
1da177e4 825 current->mempolicy = new;
c61afb18 826 mpol_set_task_struct_flag();
45c4745a 827 if (new && new->mode == MPOL_INTERLEAVE &&
f5b087b5 828 nodes_weight(new->v.nodes))
dfcd3c0d 829 current->il_next = first_node(new->v.nodes);
58568d2a 830 task_unlock(current);
f4e53d91
LS
831 if (mm)
832 up_write(&mm->mmap_sem);
833
58568d2a 834 mpol_put(old);
4bfc4495
KH
835 ret = 0;
836out:
837 NODEMASK_SCRATCH_FREE(scratch);
838 return ret;
1da177e4
LT
839}
840
bea904d5
LS
841/*
842 * Return nodemask for policy for get_mempolicy() query
58568d2a
MX
843 *
844 * Called with task's alloc_lock held
bea904d5
LS
845 */
846static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
1da177e4 847{
dfcd3c0d 848 nodes_clear(*nodes);
bea904d5
LS
849 if (p == &default_policy)
850 return;
851
45c4745a 852 switch (p->mode) {
19770b32
MG
853 case MPOL_BIND:
854 /* Fall through */
1da177e4 855 case MPOL_INTERLEAVE:
dfcd3c0d 856 *nodes = p->v.nodes;
1da177e4
LT
857 break;
858 case MPOL_PREFERRED:
fc36b8d3 859 if (!(p->flags & MPOL_F_LOCAL))
dfcd3c0d 860 node_set(p->v.preferred_node, *nodes);
53f2556b 861 /* else return empty node mask for local allocation */
1da177e4
LT
862 break;
863 default:
864 BUG();
865 }
866}
867
868static int lookup_node(struct mm_struct *mm, unsigned long addr)
869{
870 struct page *p;
871 int err;
872
873 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
874 if (err >= 0) {
875 err = page_to_nid(p);
876 put_page(p);
877 }
878 return err;
879}
880
1da177e4 881/* Retrieve NUMA policy */
dbcb0f19
AB
882static long do_get_mempolicy(int *policy, nodemask_t *nmask,
883 unsigned long addr, unsigned long flags)
1da177e4 884{
8bccd85f 885 int err;
1da177e4
LT
886 struct mm_struct *mm = current->mm;
887 struct vm_area_struct *vma = NULL;
888 struct mempolicy *pol = current->mempolicy;
889
754af6f5
LS
890 if (flags &
891 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
1da177e4 892 return -EINVAL;
754af6f5
LS
893
894 if (flags & MPOL_F_MEMS_ALLOWED) {
895 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
896 return -EINVAL;
897 *policy = 0; /* just so it's initialized */
58568d2a 898 task_lock(current);
754af6f5 899 *nmask = cpuset_current_mems_allowed;
58568d2a 900 task_unlock(current);
754af6f5
LS
901 return 0;
902 }
903
1da177e4 904 if (flags & MPOL_F_ADDR) {
bea904d5
LS
905 /*
906 * Do NOT fall back to task policy if the
907 * vma/shared policy at addr is NULL. We
908 * want to return MPOL_DEFAULT in this case.
909 */
1da177e4
LT
910 down_read(&mm->mmap_sem);
911 vma = find_vma_intersection(mm, addr, addr+1);
912 if (!vma) {
913 up_read(&mm->mmap_sem);
914 return -EFAULT;
915 }
916 if (vma->vm_ops && vma->vm_ops->get_policy)
917 pol = vma->vm_ops->get_policy(vma, addr);
918 else
919 pol = vma->vm_policy;
920 } else if (addr)
921 return -EINVAL;
922
923 if (!pol)
bea904d5 924 pol = &default_policy; /* indicates default behavior */
1da177e4
LT
925
926 if (flags & MPOL_F_NODE) {
927 if (flags & MPOL_F_ADDR) {
928 err = lookup_node(mm, addr);
929 if (err < 0)
930 goto out;
8bccd85f 931 *policy = err;
1da177e4 932 } else if (pol == current->mempolicy &&
45c4745a 933 pol->mode == MPOL_INTERLEAVE) {
8bccd85f 934 *policy = current->il_next;
1da177e4
LT
935 } else {
936 err = -EINVAL;
937 goto out;
938 }
bea904d5
LS
939 } else {
940 *policy = pol == &default_policy ? MPOL_DEFAULT :
941 pol->mode;
d79df630
DR
942 /*
943 * Internal mempolicy flags must be masked off before exposing
944 * the policy to userspace.
945 */
946 *policy |= (pol->flags & MPOL_MODE_FLAGS);
bea904d5 947 }
1da177e4
LT
948
949 if (vma) {
950 up_read(&current->mm->mmap_sem);
951 vma = NULL;
952 }
953
1da177e4 954 err = 0;
58568d2a 955 if (nmask) {
c6b6ef8b
LS
956 if (mpol_store_user_nodemask(pol)) {
957 *nmask = pol->w.user_nodemask;
958 } else {
959 task_lock(current);
960 get_policy_nodemask(pol, nmask);
961 task_unlock(current);
962 }
58568d2a 963 }
1da177e4
LT
964
965 out:
52cd3b07 966 mpol_cond_put(pol);
1da177e4
LT
967 if (vma)
968 up_read(&current->mm->mmap_sem);
969 return err;
970}
971
b20a3503 972#ifdef CONFIG_MIGRATION
6ce3c4c0
CL
973/*
974 * page migration
975 */
fc301289
CL
976static void migrate_page_add(struct page *page, struct list_head *pagelist,
977 unsigned long flags)
6ce3c4c0
CL
978{
979 /*
fc301289 980 * Avoid migrating a page that is shared with others.
6ce3c4c0 981 */
62695a84
NP
982 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
983 if (!isolate_lru_page(page)) {
984 list_add_tail(&page->lru, pagelist);
6d9c285a
KM
985 inc_zone_page_state(page, NR_ISOLATED_ANON +
986 page_is_file_cache(page));
62695a84
NP
987 }
988 }
7e2ab150 989}
6ce3c4c0 990
742755a1 991static struct page *new_node_page(struct page *page, unsigned long node, int **x)
95a402c3 992{
6484eb3e 993 return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
95a402c3
CL
994}
995
7e2ab150
CL
996/*
997 * Migrate pages from one node to a target node.
998 * Returns error or the number of pages not migrated.
999 */
dbcb0f19
AB
1000static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1001 int flags)
7e2ab150
CL
1002{
1003 nodemask_t nmask;
1004 LIST_HEAD(pagelist);
1005 int err = 0;
1006
1007 nodes_clear(nmask);
1008 node_set(source, nmask);
6ce3c4c0 1009
08270807
MK
1010 /*
1011 * This does not "check" the range but isolates all pages that
1012 * need migration. Between passing in the full user address
1013 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1014 */
1015 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1016 check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
7e2ab150
CL
1017 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1018
cf608ac1 1019 if (!list_empty(&pagelist)) {
7f0f2496 1020 err = migrate_pages(&pagelist, new_node_page, dest,
9c620e2b 1021 MIGRATE_SYNC, MR_SYSCALL);
cf608ac1
MK
1022 if (err)
1023 putback_lru_pages(&pagelist);
1024 }
95a402c3 1025
7e2ab150 1026 return err;
6ce3c4c0
CL
1027}
1028
39743889 1029/*
7e2ab150
CL
1030 * Move pages between the two nodesets so as to preserve the physical
1031 * layout as much as possible.
39743889
CL
1032 *
1033 * Returns the number of page that could not be moved.
1034 */
0ce72d4f
AM
1035int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1036 const nodemask_t *to, int flags)
39743889 1037{
7e2ab150 1038 int busy = 0;
0aedadf9 1039 int err;
7e2ab150 1040 nodemask_t tmp;
39743889 1041
0aedadf9
CL
1042 err = migrate_prep();
1043 if (err)
1044 return err;
1045
53f2556b 1046 down_read(&mm->mmap_sem);
39743889 1047
0ce72d4f 1048 err = migrate_vmas(mm, from, to, flags);
7b2259b3
CL
1049 if (err)
1050 goto out;
1051
da0aa138
KM
1052 /*
1053 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1054 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1055 * bit in 'tmp', and return that <source, dest> pair for migration.
1056 * The pair of nodemasks 'to' and 'from' define the map.
1057 *
1058 * If no pair of bits is found that way, fallback to picking some
1059 * pair of 'source' and 'dest' bits that are not the same. If the
1060 * 'source' and 'dest' bits are the same, this represents a node
1061 * that will be migrating to itself, so no pages need move.
1062 *
1063 * If no bits are left in 'tmp', or if all remaining bits left
1064 * in 'tmp' correspond to the same bit in 'to', return false
1065 * (nothing left to migrate).
1066 *
1067 * This lets us pick a pair of nodes to migrate between, such that
1068 * if possible the dest node is not already occupied by some other
1069 * source node, minimizing the risk of overloading the memory on a
1070 * node that would happen if we migrated incoming memory to a node
1071 * before migrating outgoing memory source that same node.
1072 *
1073 * A single scan of tmp is sufficient. As we go, we remember the
1074 * most recent <s, d> pair that moved (s != d). If we find a pair
1075 * that not only moved, but what's better, moved to an empty slot
1076 * (d is not set in tmp), then we break out then, with that pair.
ae0e47f0 1077 * Otherwise when we finish scanning from_tmp, we at least have the
da0aa138
KM
1078 * most recent <s, d> pair that moved. If we get all the way through
1079 * the scan of tmp without finding any node that moved, much less
1080 * moved to an empty node, then there is nothing left worth migrating.
1081 */
d4984711 1082
0ce72d4f 1083 tmp = *from;
7e2ab150
CL
1084 while (!nodes_empty(tmp)) {
1085 int s,d;
1086 int source = -1;
1087 int dest = 0;
1088
1089 for_each_node_mask(s, tmp) {
4a5b18cc
LW
1090
1091 /*
1092 * do_migrate_pages() tries to maintain the relative
1093 * node relationship of the pages established between
1094 * threads and memory areas.
1095 *
1096 * However if the number of source nodes is not equal to
1097 * the number of destination nodes we can not preserve
1098 * this node relative relationship. In that case, skip
1099 * copying memory from a node that is in the destination
1100 * mask.
1101 *
1102 * Example: [2,3,4] -> [3,4,5] moves everything.
1103 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1104 */
1105
0ce72d4f
AM
1106 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1107 (node_isset(s, *to)))
4a5b18cc
LW
1108 continue;
1109
0ce72d4f 1110 d = node_remap(s, *from, *to);
7e2ab150
CL
1111 if (s == d)
1112 continue;
1113
1114 source = s; /* Node moved. Memorize */
1115 dest = d;
1116
1117 /* dest not in remaining from nodes? */
1118 if (!node_isset(dest, tmp))
1119 break;
1120 }
1121 if (source == -1)
1122 break;
1123
1124 node_clear(source, tmp);
1125 err = migrate_to_node(mm, source, dest, flags);
1126 if (err > 0)
1127 busy += err;
1128 if (err < 0)
1129 break;
39743889 1130 }
7b2259b3 1131out:
39743889 1132 up_read(&mm->mmap_sem);
7e2ab150
CL
1133 if (err < 0)
1134 return err;
1135 return busy;
b20a3503
CL
1136
1137}
1138
3ad33b24
LS
1139/*
1140 * Allocate a new page for page migration based on vma policy.
1141 * Start assuming that page is mapped by vma pointed to by @private.
1142 * Search forward from there, if not. N.B., this assumes that the
1143 * list of pages handed to migrate_pages()--which is how we get here--
1144 * is in virtual address order.
1145 */
742755a1 1146static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
95a402c3
CL
1147{
1148 struct vm_area_struct *vma = (struct vm_area_struct *)private;
3ad33b24 1149 unsigned long uninitialized_var(address);
95a402c3 1150
3ad33b24
LS
1151 while (vma) {
1152 address = page_address_in_vma(page, vma);
1153 if (address != -EFAULT)
1154 break;
1155 vma = vma->vm_next;
1156 }
1157
1158 /*
1159 * if !vma, alloc_page_vma() will use task or system default policy
1160 */
1161 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
95a402c3 1162}
b20a3503
CL
1163#else
1164
1165static void migrate_page_add(struct page *page, struct list_head *pagelist,
1166 unsigned long flags)
1167{
39743889
CL
1168}
1169
0ce72d4f
AM
1170int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1171 const nodemask_t *to, int flags)
b20a3503
CL
1172{
1173 return -ENOSYS;
1174}
95a402c3 1175
69939749 1176static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
95a402c3
CL
1177{
1178 return NULL;
1179}
b20a3503
CL
1180#endif
1181
dbcb0f19 1182static long do_mbind(unsigned long start, unsigned long len,
028fec41
DR
1183 unsigned short mode, unsigned short mode_flags,
1184 nodemask_t *nmask, unsigned long flags)
6ce3c4c0
CL
1185{
1186 struct vm_area_struct *vma;
1187 struct mm_struct *mm = current->mm;
1188 struct mempolicy *new;
1189 unsigned long end;
1190 int err;
1191 LIST_HEAD(pagelist);
1192
b24f53a0 1193 if (flags & ~(unsigned long)MPOL_MF_VALID)
6ce3c4c0 1194 return -EINVAL;
74c00241 1195 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
6ce3c4c0
CL
1196 return -EPERM;
1197
1198 if (start & ~PAGE_MASK)
1199 return -EINVAL;
1200
1201 if (mode == MPOL_DEFAULT)
1202 flags &= ~MPOL_MF_STRICT;
1203
1204 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1205 end = start + len;
1206
1207 if (end < start)
1208 return -EINVAL;
1209 if (end == start)
1210 return 0;
1211
028fec41 1212 new = mpol_new(mode, mode_flags, nmask);
6ce3c4c0
CL
1213 if (IS_ERR(new))
1214 return PTR_ERR(new);
1215
b24f53a0
LS
1216 if (flags & MPOL_MF_LAZY)
1217 new->flags |= MPOL_F_MOF;
1218
6ce3c4c0
CL
1219 /*
1220 * If we are using the default policy then operation
1221 * on discontinuous address spaces is okay after all
1222 */
1223 if (!new)
1224 flags |= MPOL_MF_DISCONTIG_OK;
1225
028fec41
DR
1226 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1227 start, start + len, mode, mode_flags,
00ef2d2f 1228 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
6ce3c4c0 1229
0aedadf9
CL
1230 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1231
1232 err = migrate_prep();
1233 if (err)
b05ca738 1234 goto mpol_out;
0aedadf9 1235 }
4bfc4495
KH
1236 {
1237 NODEMASK_SCRATCH(scratch);
1238 if (scratch) {
1239 down_write(&mm->mmap_sem);
1240 task_lock(current);
1241 err = mpol_set_nodemask(new, nmask, scratch);
1242 task_unlock(current);
1243 if (err)
1244 up_write(&mm->mmap_sem);
1245 } else
1246 err = -ENOMEM;
1247 NODEMASK_SCRATCH_FREE(scratch);
1248 }
b05ca738
KM
1249 if (err)
1250 goto mpol_out;
1251
6ce3c4c0
CL
1252 vma = check_range(mm, start, end, nmask,
1253 flags | MPOL_MF_INVERT, &pagelist);
1254
b24f53a0 1255 err = PTR_ERR(vma); /* maybe ... */
a720094d 1256 if (!IS_ERR(vma))
9d8cebd4 1257 err = mbind_range(mm, start, end, new);
7e2ab150 1258
b24f53a0
LS
1259 if (!err) {
1260 int nr_failed = 0;
1261
cf608ac1 1262 if (!list_empty(&pagelist)) {
b24f53a0 1263 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
95a402c3 1264 nr_failed = migrate_pages(&pagelist, new_vma_page,
9c620e2b
HD
1265 (unsigned long)vma,
1266 MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
cf608ac1
MK
1267 if (nr_failed)
1268 putback_lru_pages(&pagelist);
1269 }
6ce3c4c0 1270
b24f53a0 1271 if (nr_failed && (flags & MPOL_MF_STRICT))
6ce3c4c0 1272 err = -EIO;
ab8a3e14
KM
1273 } else
1274 putback_lru_pages(&pagelist);
b20a3503 1275
6ce3c4c0 1276 up_write(&mm->mmap_sem);
b05ca738 1277 mpol_out:
f0be3d32 1278 mpol_put(new);
6ce3c4c0
CL
1279 return err;
1280}
1281
8bccd85f
CL
1282/*
1283 * User space interface with variable sized bitmaps for nodelists.
1284 */
1285
1286/* Copy a node mask from user space. */
39743889 1287static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
8bccd85f
CL
1288 unsigned long maxnode)
1289{
1290 unsigned long k;
1291 unsigned long nlongs;
1292 unsigned long endmask;
1293
1294 --maxnode;
1295 nodes_clear(*nodes);
1296 if (maxnode == 0 || !nmask)
1297 return 0;
a9c930ba 1298 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
636f13c1 1299 return -EINVAL;
8bccd85f
CL
1300
1301 nlongs = BITS_TO_LONGS(maxnode);
1302 if ((maxnode % BITS_PER_LONG) == 0)
1303 endmask = ~0UL;
1304 else
1305 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1306
1307 /* When the user specified more nodes than supported just check
1308 if the non supported part is all zero. */
1309 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1310 if (nlongs > PAGE_SIZE/sizeof(long))
1311 return -EINVAL;
1312 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1313 unsigned long t;
1314 if (get_user(t, nmask + k))
1315 return -EFAULT;
1316 if (k == nlongs - 1) {
1317 if (t & endmask)
1318 return -EINVAL;
1319 } else if (t)
1320 return -EINVAL;
1321 }
1322 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1323 endmask = ~0UL;
1324 }
1325
1326 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1327 return -EFAULT;
1328 nodes_addr(*nodes)[nlongs-1] &= endmask;
1329 return 0;
1330}
1331
1332/* Copy a kernel node mask to user space */
1333static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1334 nodemask_t *nodes)
1335{
1336 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1337 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1338
1339 if (copy > nbytes) {
1340 if (copy > PAGE_SIZE)
1341 return -EINVAL;
1342 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1343 return -EFAULT;
1344 copy = nbytes;
1345 }
1346 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1347}
1348
938bb9f5
HC
1349SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1350 unsigned long, mode, unsigned long __user *, nmask,
1351 unsigned long, maxnode, unsigned, flags)
8bccd85f
CL
1352{
1353 nodemask_t nodes;
1354 int err;
028fec41 1355 unsigned short mode_flags;
8bccd85f 1356
028fec41
DR
1357 mode_flags = mode & MPOL_MODE_FLAGS;
1358 mode &= ~MPOL_MODE_FLAGS;
a3b51e01
DR
1359 if (mode >= MPOL_MAX)
1360 return -EINVAL;
4c50bc01
DR
1361 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1362 (mode_flags & MPOL_F_RELATIVE_NODES))
1363 return -EINVAL;
8bccd85f
CL
1364 err = get_nodes(&nodes, nmask, maxnode);
1365 if (err)
1366 return err;
028fec41 1367 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
8bccd85f
CL
1368}
1369
1370/* Set the process memory policy */
938bb9f5
HC
1371SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1372 unsigned long, maxnode)
8bccd85f
CL
1373{
1374 int err;
1375 nodemask_t nodes;
028fec41 1376 unsigned short flags;
8bccd85f 1377
028fec41
DR
1378 flags = mode & MPOL_MODE_FLAGS;
1379 mode &= ~MPOL_MODE_FLAGS;
1380 if ((unsigned int)mode >= MPOL_MAX)
8bccd85f 1381 return -EINVAL;
4c50bc01
DR
1382 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1383 return -EINVAL;
8bccd85f
CL
1384 err = get_nodes(&nodes, nmask, maxnode);
1385 if (err)
1386 return err;
028fec41 1387 return do_set_mempolicy(mode, flags, &nodes);
8bccd85f
CL
1388}
1389
938bb9f5
HC
1390SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1391 const unsigned long __user *, old_nodes,
1392 const unsigned long __user *, new_nodes)
39743889 1393{
c69e8d9c 1394 const struct cred *cred = current_cred(), *tcred;
596d7cfa 1395 struct mm_struct *mm = NULL;
39743889 1396 struct task_struct *task;
39743889
CL
1397 nodemask_t task_nodes;
1398 int err;
596d7cfa
KM
1399 nodemask_t *old;
1400 nodemask_t *new;
1401 NODEMASK_SCRATCH(scratch);
1402
1403 if (!scratch)
1404 return -ENOMEM;
39743889 1405
596d7cfa
KM
1406 old = &scratch->mask1;
1407 new = &scratch->mask2;
1408
1409 err = get_nodes(old, old_nodes, maxnode);
39743889 1410 if (err)
596d7cfa 1411 goto out;
39743889 1412
596d7cfa 1413 err = get_nodes(new, new_nodes, maxnode);
39743889 1414 if (err)
596d7cfa 1415 goto out;
39743889
CL
1416
1417 /* Find the mm_struct */
55cfaa3c 1418 rcu_read_lock();
228ebcbe 1419 task = pid ? find_task_by_vpid(pid) : current;
39743889 1420 if (!task) {
55cfaa3c 1421 rcu_read_unlock();
596d7cfa
KM
1422 err = -ESRCH;
1423 goto out;
39743889 1424 }
3268c63e 1425 get_task_struct(task);
39743889 1426
596d7cfa 1427 err = -EINVAL;
39743889
CL
1428
1429 /*
1430 * Check if this process has the right to modify the specified
1431 * process. The right exists if the process has administrative
7f927fcc 1432 * capabilities, superuser privileges or the same
39743889
CL
1433 * userid as the target process.
1434 */
c69e8d9c 1435 tcred = __task_cred(task);
b38a86eb
EB
1436 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1437 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
74c00241 1438 !capable(CAP_SYS_NICE)) {
c69e8d9c 1439 rcu_read_unlock();
39743889 1440 err = -EPERM;
3268c63e 1441 goto out_put;
39743889 1442 }
c69e8d9c 1443 rcu_read_unlock();
39743889
CL
1444
1445 task_nodes = cpuset_mems_allowed(task);
1446 /* Is the user allowed to access the target nodes? */
596d7cfa 1447 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
39743889 1448 err = -EPERM;
3268c63e 1449 goto out_put;
39743889
CL
1450 }
1451
01f13bd6 1452 if (!nodes_subset(*new, node_states[N_MEMORY])) {
3b42d28b 1453 err = -EINVAL;
3268c63e 1454 goto out_put;
3b42d28b
CL
1455 }
1456
86c3a764
DQ
1457 err = security_task_movememory(task);
1458 if (err)
3268c63e 1459 goto out_put;
86c3a764 1460
3268c63e
CL
1461 mm = get_task_mm(task);
1462 put_task_struct(task);
f2a9ef88
SL
1463
1464 if (!mm) {
3268c63e 1465 err = -EINVAL;
f2a9ef88
SL
1466 goto out;
1467 }
1468
1469 err = do_migrate_pages(mm, old, new,
1470 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
3268c63e
CL
1471
1472 mmput(mm);
1473out:
596d7cfa
KM
1474 NODEMASK_SCRATCH_FREE(scratch);
1475
39743889 1476 return err;
3268c63e
CL
1477
1478out_put:
1479 put_task_struct(task);
1480 goto out;
1481
39743889
CL
1482}
1483
1484
8bccd85f 1485/* Retrieve NUMA policy */
938bb9f5
HC
1486SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1487 unsigned long __user *, nmask, unsigned long, maxnode,
1488 unsigned long, addr, unsigned long, flags)
8bccd85f 1489{
dbcb0f19
AB
1490 int err;
1491 int uninitialized_var(pval);
8bccd85f
CL
1492 nodemask_t nodes;
1493
1494 if (nmask != NULL && maxnode < MAX_NUMNODES)
1495 return -EINVAL;
1496
1497 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1498
1499 if (err)
1500 return err;
1501
1502 if (policy && put_user(pval, policy))
1503 return -EFAULT;
1504
1505 if (nmask)
1506 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1507
1508 return err;
1509}
1510
1da177e4
LT
1511#ifdef CONFIG_COMPAT
1512
1513asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1514 compat_ulong_t __user *nmask,
1515 compat_ulong_t maxnode,
1516 compat_ulong_t addr, compat_ulong_t flags)
1517{
1518 long err;
1519 unsigned long __user *nm = NULL;
1520 unsigned long nr_bits, alloc_size;
1521 DECLARE_BITMAP(bm, MAX_NUMNODES);
1522
1523 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1524 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1525
1526 if (nmask)
1527 nm = compat_alloc_user_space(alloc_size);
1528
1529 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1530
1531 if (!err && nmask) {
2bbff6c7
KH
1532 unsigned long copy_size;
1533 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1534 err = copy_from_user(bm, nm, copy_size);
1da177e4
LT
1535 /* ensure entire bitmap is zeroed */
1536 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1537 err |= compat_put_bitmap(nmask, bm, nr_bits);
1538 }
1539
1540 return err;
1541}
1542
1543asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1544 compat_ulong_t maxnode)
1545{
1546 long err = 0;
1547 unsigned long __user *nm = NULL;
1548 unsigned long nr_bits, alloc_size;
1549 DECLARE_BITMAP(bm, MAX_NUMNODES);
1550
1551 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1552 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1553
1554 if (nmask) {
1555 err = compat_get_bitmap(bm, nmask, nr_bits);
1556 nm = compat_alloc_user_space(alloc_size);
1557 err |= copy_to_user(nm, bm, alloc_size);
1558 }
1559
1560 if (err)
1561 return -EFAULT;
1562
1563 return sys_set_mempolicy(mode, nm, nr_bits+1);
1564}
1565
1566asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1567 compat_ulong_t mode, compat_ulong_t __user *nmask,
1568 compat_ulong_t maxnode, compat_ulong_t flags)
1569{
1570 long err = 0;
1571 unsigned long __user *nm = NULL;
1572 unsigned long nr_bits, alloc_size;
dfcd3c0d 1573 nodemask_t bm;
1da177e4
LT
1574
1575 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1576 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1577
1578 if (nmask) {
dfcd3c0d 1579 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1da177e4 1580 nm = compat_alloc_user_space(alloc_size);
dfcd3c0d 1581 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1da177e4
LT
1582 }
1583
1584 if (err)
1585 return -EFAULT;
1586
1587 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1588}
1589
1590#endif
1591
480eccf9
LS
1592/*
1593 * get_vma_policy(@task, @vma, @addr)
1594 * @task - task for fallback if vma policy == default
1595 * @vma - virtual memory area whose policy is sought
1596 * @addr - address in @vma for shared policy lookup
1597 *
1598 * Returns effective policy for a VMA at specified address.
1599 * Falls back to @task or system default policy, as necessary.
32f8516a
DR
1600 * Current or other task's task mempolicy and non-shared vma policies must be
1601 * protected by task_lock(task) by the caller.
52cd3b07
LS
1602 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1603 * count--added by the get_policy() vm_op, as appropriate--to protect against
1604 * freeing by another task. It is the caller's responsibility to free the
1605 * extra reference for shared policies.
480eccf9 1606 */
d98f6cb6 1607struct mempolicy *get_vma_policy(struct task_struct *task,
48fce342 1608 struct vm_area_struct *vma, unsigned long addr)
1da177e4 1609{
5606e387 1610 struct mempolicy *pol = get_task_policy(task);
1da177e4
LT
1611
1612 if (vma) {
480eccf9 1613 if (vma->vm_ops && vma->vm_ops->get_policy) {
ae4d8c16
LS
1614 struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1615 addr);
1616 if (vpol)
1617 pol = vpol;
00442ad0 1618 } else if (vma->vm_policy) {
1da177e4 1619 pol = vma->vm_policy;
00442ad0
MG
1620
1621 /*
1622 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1623 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1624 * count on these policies which will be dropped by
1625 * mpol_cond_put() later
1626 */
1627 if (mpol_needs_cond_ref(pol))
1628 mpol_get(pol);
1629 }
1da177e4
LT
1630 }
1631 if (!pol)
1632 pol = &default_policy;
1633 return pol;
1634}
1635
d3eb1570
LJ
1636static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1637{
1638 enum zone_type dynamic_policy_zone = policy_zone;
1639
1640 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1641
1642 /*
1643 * if policy->v.nodes has movable memory only,
1644 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1645 *
1646 * policy->v.nodes is intersect with node_states[N_MEMORY].
1647 * so if the following test faile, it implies
1648 * policy->v.nodes has movable memory only.
1649 */
1650 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1651 dynamic_policy_zone = ZONE_MOVABLE;
1652
1653 return zone >= dynamic_policy_zone;
1654}
1655
52cd3b07
LS
1656/*
1657 * Return a nodemask representing a mempolicy for filtering nodes for
1658 * page allocation
1659 */
1660static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
19770b32
MG
1661{
1662 /* Lower zones don't get a nodemask applied for MPOL_BIND */
45c4745a 1663 if (unlikely(policy->mode == MPOL_BIND) &&
d3eb1570 1664 apply_policy_zone(policy, gfp_zone(gfp)) &&
19770b32
MG
1665 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1666 return &policy->v.nodes;
1667
1668 return NULL;
1669}
1670
52cd3b07 1671/* Return a zonelist indicated by gfp for node representing a mempolicy */
2f5f9486
AK
1672static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1673 int nd)
1da177e4 1674{
45c4745a 1675 switch (policy->mode) {
1da177e4 1676 case MPOL_PREFERRED:
fc36b8d3
LS
1677 if (!(policy->flags & MPOL_F_LOCAL))
1678 nd = policy->v.preferred_node;
1da177e4
LT
1679 break;
1680 case MPOL_BIND:
19770b32 1681 /*
52cd3b07
LS
1682 * Normally, MPOL_BIND allocations are node-local within the
1683 * allowed nodemask. However, if __GFP_THISNODE is set and the
6eb27e1f 1684 * current node isn't part of the mask, we use the zonelist for
52cd3b07 1685 * the first node in the mask instead.
19770b32 1686 */
19770b32
MG
1687 if (unlikely(gfp & __GFP_THISNODE) &&
1688 unlikely(!node_isset(nd, policy->v.nodes)))
1689 nd = first_node(policy->v.nodes);
1690 break;
1da177e4 1691 default:
1da177e4
LT
1692 BUG();
1693 }
0e88460d 1694 return node_zonelist(nd, gfp);
1da177e4
LT
1695}
1696
1697/* Do dynamic interleaving for a process */
1698static unsigned interleave_nodes(struct mempolicy *policy)
1699{
1700 unsigned nid, next;
1701 struct task_struct *me = current;
1702
1703 nid = me->il_next;
dfcd3c0d 1704 next = next_node(nid, policy->v.nodes);
1da177e4 1705 if (next >= MAX_NUMNODES)
dfcd3c0d 1706 next = first_node(policy->v.nodes);
f5b087b5
DR
1707 if (next < MAX_NUMNODES)
1708 me->il_next = next;
1da177e4
LT
1709 return nid;
1710}
1711
dc85da15
CL
1712/*
1713 * Depending on the memory policy provide a node from which to allocate the
1714 * next slab entry.
52cd3b07
LS
1715 * @policy must be protected by freeing by the caller. If @policy is
1716 * the current task's mempolicy, this protection is implicit, as only the
1717 * task can change it's policy. The system default policy requires no
1718 * such protection.
dc85da15 1719 */
e7b691b0 1720unsigned slab_node(void)
dc85da15 1721{
e7b691b0
AK
1722 struct mempolicy *policy;
1723
1724 if (in_interrupt())
1725 return numa_node_id();
1726
1727 policy = current->mempolicy;
fc36b8d3 1728 if (!policy || policy->flags & MPOL_F_LOCAL)
bea904d5
LS
1729 return numa_node_id();
1730
1731 switch (policy->mode) {
1732 case MPOL_PREFERRED:
fc36b8d3
LS
1733 /*
1734 * handled MPOL_F_LOCAL above
1735 */
1736 return policy->v.preferred_node;
765c4507 1737
dc85da15
CL
1738 case MPOL_INTERLEAVE:
1739 return interleave_nodes(policy);
1740
dd1a239f 1741 case MPOL_BIND: {
dc85da15
CL
1742 /*
1743 * Follow bind policy behavior and start allocation at the
1744 * first node.
1745 */
19770b32
MG
1746 struct zonelist *zonelist;
1747 struct zone *zone;
1748 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1749 zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1750 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1751 &policy->v.nodes,
1752 &zone);
800416f7 1753 return zone ? zone->node : numa_node_id();
dd1a239f 1754 }
dc85da15 1755
dc85da15 1756 default:
bea904d5 1757 BUG();
dc85da15
CL
1758 }
1759}
1760
1da177e4
LT
1761/* Do static interleaving for a VMA with known offset. */
1762static unsigned offset_il_node(struct mempolicy *pol,
1763 struct vm_area_struct *vma, unsigned long off)
1764{
dfcd3c0d 1765 unsigned nnodes = nodes_weight(pol->v.nodes);
f5b087b5 1766 unsigned target;
1da177e4
LT
1767 int c;
1768 int nid = -1;
1769
f5b087b5
DR
1770 if (!nnodes)
1771 return numa_node_id();
1772 target = (unsigned int)off % nnodes;
1da177e4
LT
1773 c = 0;
1774 do {
dfcd3c0d 1775 nid = next_node(nid, pol->v.nodes);
1da177e4
LT
1776 c++;
1777 } while (c <= target);
1da177e4
LT
1778 return nid;
1779}
1780
5da7ca86
CL
1781/* Determine a node number for interleave */
1782static inline unsigned interleave_nid(struct mempolicy *pol,
1783 struct vm_area_struct *vma, unsigned long addr, int shift)
1784{
1785 if (vma) {
1786 unsigned long off;
1787
3b98b087
NA
1788 /*
1789 * for small pages, there is no difference between
1790 * shift and PAGE_SHIFT, so the bit-shift is safe.
1791 * for huge pages, since vm_pgoff is in units of small
1792 * pages, we need to shift off the always 0 bits to get
1793 * a useful offset.
1794 */
1795 BUG_ON(shift < PAGE_SHIFT);
1796 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
5da7ca86
CL
1797 off += (addr - vma->vm_start) >> shift;
1798 return offset_il_node(pol, vma, off);
1799 } else
1800 return interleave_nodes(pol);
1801}
1802
778d3b0f
MH
1803/*
1804 * Return the bit number of a random bit set in the nodemask.
1805 * (returns -1 if nodemask is empty)
1806 */
1807int node_random(const nodemask_t *maskp)
1808{
1809 int w, bit = -1;
1810
1811 w = nodes_weight(*maskp);
1812 if (w)
1813 bit = bitmap_ord_to_pos(maskp->bits,
1814 get_random_int() % w, MAX_NUMNODES);
1815 return bit;
1816}
1817
00ac59ad 1818#ifdef CONFIG_HUGETLBFS
480eccf9
LS
1819/*
1820 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1821 * @vma = virtual memory area whose policy is sought
1822 * @addr = address in @vma for shared policy lookup and interleave policy
1823 * @gfp_flags = for requested zone
19770b32
MG
1824 * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1825 * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
480eccf9 1826 *
52cd3b07
LS
1827 * Returns a zonelist suitable for a huge page allocation and a pointer
1828 * to the struct mempolicy for conditional unref after allocation.
1829 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1830 * @nodemask for filtering the zonelist.
c0ff7453
MX
1831 *
1832 * Must be protected by get_mems_allowed()
480eccf9 1833 */
396faf03 1834struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
19770b32
MG
1835 gfp_t gfp_flags, struct mempolicy **mpol,
1836 nodemask_t **nodemask)
5da7ca86 1837{
480eccf9 1838 struct zonelist *zl;
5da7ca86 1839
52cd3b07 1840 *mpol = get_vma_policy(current, vma, addr);
19770b32 1841 *nodemask = NULL; /* assume !MPOL_BIND */
5da7ca86 1842
52cd3b07
LS
1843 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1844 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
a5516438 1845 huge_page_shift(hstate_vma(vma))), gfp_flags);
52cd3b07 1846 } else {
2f5f9486 1847 zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
52cd3b07
LS
1848 if ((*mpol)->mode == MPOL_BIND)
1849 *nodemask = &(*mpol)->v.nodes;
480eccf9
LS
1850 }
1851 return zl;
5da7ca86 1852}
06808b08
LS
1853
1854/*
1855 * init_nodemask_of_mempolicy
1856 *
1857 * If the current task's mempolicy is "default" [NULL], return 'false'
1858 * to indicate default policy. Otherwise, extract the policy nodemask
1859 * for 'bind' or 'interleave' policy into the argument nodemask, or
1860 * initialize the argument nodemask to contain the single node for
1861 * 'preferred' or 'local' policy and return 'true' to indicate presence
1862 * of non-default mempolicy.
1863 *
1864 * We don't bother with reference counting the mempolicy [mpol_get/put]
1865 * because the current task is examining it's own mempolicy and a task's
1866 * mempolicy is only ever changed by the task itself.
1867 *
1868 * N.B., it is the caller's responsibility to free a returned nodemask.
1869 */
1870bool init_nodemask_of_mempolicy(nodemask_t *mask)
1871{
1872 struct mempolicy *mempolicy;
1873 int nid;
1874
1875 if (!(mask && current->mempolicy))
1876 return false;
1877
c0ff7453 1878 task_lock(current);
06808b08
LS
1879 mempolicy = current->mempolicy;
1880 switch (mempolicy->mode) {
1881 case MPOL_PREFERRED:
1882 if (mempolicy->flags & MPOL_F_LOCAL)
1883 nid = numa_node_id();
1884 else
1885 nid = mempolicy->v.preferred_node;
1886 init_nodemask_of_node(mask, nid);
1887 break;
1888
1889 case MPOL_BIND:
1890 /* Fall through */
1891 case MPOL_INTERLEAVE:
1892 *mask = mempolicy->v.nodes;
1893 break;
1894
1895 default:
1896 BUG();
1897 }
c0ff7453 1898 task_unlock(current);
06808b08
LS
1899
1900 return true;
1901}
00ac59ad 1902#endif
5da7ca86 1903
6f48d0eb
DR
1904/*
1905 * mempolicy_nodemask_intersects
1906 *
1907 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1908 * policy. Otherwise, check for intersection between mask and the policy
1909 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1910 * policy, always return true since it may allocate elsewhere on fallback.
1911 *
1912 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1913 */
1914bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1915 const nodemask_t *mask)
1916{
1917 struct mempolicy *mempolicy;
1918 bool ret = true;
1919
1920 if (!mask)
1921 return ret;
1922 task_lock(tsk);
1923 mempolicy = tsk->mempolicy;
1924 if (!mempolicy)
1925 goto out;
1926
1927 switch (mempolicy->mode) {
1928 case MPOL_PREFERRED:
1929 /*
1930 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1931 * allocate from, they may fallback to other nodes when oom.
1932 * Thus, it's possible for tsk to have allocated memory from
1933 * nodes in mask.
1934 */
1935 break;
1936 case MPOL_BIND:
1937 case MPOL_INTERLEAVE:
1938 ret = nodes_intersects(mempolicy->v.nodes, *mask);
1939 break;
1940 default:
1941 BUG();
1942 }
1943out:
1944 task_unlock(tsk);
1945 return ret;
1946}
1947
1da177e4
LT
1948/* Allocate a page in interleaved policy.
1949 Own path because it needs to do special accounting. */
662f3a0b
AK
1950static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1951 unsigned nid)
1da177e4
LT
1952{
1953 struct zonelist *zl;
1954 struct page *page;
1955
0e88460d 1956 zl = node_zonelist(nid, gfp);
1da177e4 1957 page = __alloc_pages(gfp, order, zl);
dd1a239f 1958 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
ca889e6c 1959 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1da177e4
LT
1960 return page;
1961}
1962
1963/**
0bbbc0b3 1964 * alloc_pages_vma - Allocate a page for a VMA.
1da177e4
LT
1965 *
1966 * @gfp:
1967 * %GFP_USER user allocation.
1968 * %GFP_KERNEL kernel allocations,
1969 * %GFP_HIGHMEM highmem/user allocations,
1970 * %GFP_FS allocation should not call back into a file system.
1971 * %GFP_ATOMIC don't sleep.
1972 *
0bbbc0b3 1973 * @order:Order of the GFP allocation.
1da177e4
LT
1974 * @vma: Pointer to VMA or NULL if not available.
1975 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1976 *
1977 * This function allocates a page from the kernel page pool and applies
1978 * a NUMA policy associated with the VMA or the current process.
1979 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1980 * mm_struct of the VMA to prevent it from going away. Should be used for
1981 * all allocations for pages that will be mapped into
1982 * user space. Returns NULL when no page can be allocated.
1983 *
1984 * Should be called with the mm_sem of the vma hold.
1985 */
1986struct page *
0bbbc0b3 1987alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2f5f9486 1988 unsigned long addr, int node)
1da177e4 1989{
cc9a6c87 1990 struct mempolicy *pol;
c0ff7453 1991 struct page *page;
cc9a6c87
MG
1992 unsigned int cpuset_mems_cookie;
1993
1994retry_cpuset:
1995 pol = get_vma_policy(current, vma, addr);
1996 cpuset_mems_cookie = get_mems_allowed();
1da177e4 1997
45c4745a 1998 if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
1da177e4 1999 unsigned nid;
5da7ca86 2000
8eac563c 2001 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
52cd3b07 2002 mpol_cond_put(pol);
0bbbc0b3 2003 page = alloc_page_interleave(gfp, order, nid);
cc9a6c87
MG
2004 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2005 goto retry_cpuset;
2006
c0ff7453 2007 return page;
1da177e4 2008 }
212a0a6f
DR
2009 page = __alloc_pages_nodemask(gfp, order,
2010 policy_zonelist(gfp, pol, node),
0bbbc0b3 2011 policy_nodemask(gfp, pol));
212a0a6f
DR
2012 if (unlikely(mpol_needs_cond_ref(pol)))
2013 __mpol_put(pol);
cc9a6c87
MG
2014 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2015 goto retry_cpuset;
c0ff7453 2016 return page;
1da177e4
LT
2017}
2018
2019/**
2020 * alloc_pages_current - Allocate pages.
2021 *
2022 * @gfp:
2023 * %GFP_USER user allocation,
2024 * %GFP_KERNEL kernel allocation,
2025 * %GFP_HIGHMEM highmem allocation,
2026 * %GFP_FS don't call back into a file system.
2027 * %GFP_ATOMIC don't sleep.
2028 * @order: Power of two of allocation size in pages. 0 is a single page.
2029 *
2030 * Allocate a page from the kernel page pool. When not in
2031 * interrupt context and apply the current process NUMA policy.
2032 * Returns NULL when no page can be allocated.
2033 *
cf2a473c 2034 * Don't call cpuset_update_task_memory_state() unless
1da177e4
LT
2035 * 1) it's ok to take cpuset_sem (can WAIT), and
2036 * 2) allocating for current task (not interrupt).
2037 */
dd0fc66f 2038struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1da177e4 2039{
5606e387 2040 struct mempolicy *pol = get_task_policy(current);
c0ff7453 2041 struct page *page;
cc9a6c87 2042 unsigned int cpuset_mems_cookie;
1da177e4 2043
9b819d20 2044 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1da177e4 2045 pol = &default_policy;
52cd3b07 2046
cc9a6c87
MG
2047retry_cpuset:
2048 cpuset_mems_cookie = get_mems_allowed();
2049
52cd3b07
LS
2050 /*
2051 * No reference counting needed for current->mempolicy
2052 * nor system default_policy
2053 */
45c4745a 2054 if (pol->mode == MPOL_INTERLEAVE)
c0ff7453
MX
2055 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2056 else
2057 page = __alloc_pages_nodemask(gfp, order,
5c4b4be3
AK
2058 policy_zonelist(gfp, pol, numa_node_id()),
2059 policy_nodemask(gfp, pol));
cc9a6c87
MG
2060
2061 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2062 goto retry_cpuset;
2063
c0ff7453 2064 return page;
1da177e4
LT
2065}
2066EXPORT_SYMBOL(alloc_pages_current);
2067
4225399a 2068/*
846a16bf 2069 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
4225399a
PJ
2070 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2071 * with the mems_allowed returned by cpuset_mems_allowed(). This
2072 * keeps mempolicies cpuset relative after its cpuset moves. See
2073 * further kernel/cpuset.c update_nodemask().
708c1bbc
MX
2074 *
2075 * current's mempolicy may be rebinded by the other task(the task that changes
2076 * cpuset's mems), so we needn't do rebind work for current task.
4225399a 2077 */
4225399a 2078
846a16bf
LS
2079/* Slow path of a mempolicy duplicate */
2080struct mempolicy *__mpol_dup(struct mempolicy *old)
1da177e4
LT
2081{
2082 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2083
2084 if (!new)
2085 return ERR_PTR(-ENOMEM);
708c1bbc
MX
2086
2087 /* task's mempolicy is protected by alloc_lock */
2088 if (old == current->mempolicy) {
2089 task_lock(current);
2090 *new = *old;
2091 task_unlock(current);
2092 } else
2093 *new = *old;
2094
99ee4ca7 2095 rcu_read_lock();
4225399a
PJ
2096 if (current_cpuset_is_being_rebound()) {
2097 nodemask_t mems = cpuset_mems_allowed(current);
708c1bbc
MX
2098 if (new->flags & MPOL_F_REBINDING)
2099 mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2100 else
2101 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
4225399a 2102 }
99ee4ca7 2103 rcu_read_unlock();
1da177e4 2104 atomic_set(&new->refcnt, 1);
1da177e4
LT
2105 return new;
2106}
2107
2108/* Slow path of a mempolicy comparison */
fcfb4dcc 2109bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1da177e4
LT
2110{
2111 if (!a || !b)
fcfb4dcc 2112 return false;
45c4745a 2113 if (a->mode != b->mode)
fcfb4dcc 2114 return false;
19800502 2115 if (a->flags != b->flags)
fcfb4dcc 2116 return false;
19800502
BL
2117 if (mpol_store_user_nodemask(a))
2118 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
fcfb4dcc 2119 return false;
19800502 2120
45c4745a 2121 switch (a->mode) {
19770b32
MG
2122 case MPOL_BIND:
2123 /* Fall through */
1da177e4 2124 case MPOL_INTERLEAVE:
fcfb4dcc 2125 return !!nodes_equal(a->v.nodes, b->v.nodes);
1da177e4 2126 case MPOL_PREFERRED:
75719661 2127 return a->v.preferred_node == b->v.preferred_node;
1da177e4
LT
2128 default:
2129 BUG();
fcfb4dcc 2130 return false;
1da177e4
LT
2131 }
2132}
2133
1da177e4
LT
2134/*
2135 * Shared memory backing store policy support.
2136 *
2137 * Remember policies even when nobody has shared memory mapped.
2138 * The policies are kept in Red-Black tree linked from the inode.
2139 * They are protected by the sp->lock spinlock, which should be held
2140 * for any accesses to the tree.
2141 */
2142
2143/* lookup first element intersecting start-end */
42288fe3 2144/* Caller holds sp->lock */
1da177e4
LT
2145static struct sp_node *
2146sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2147{
2148 struct rb_node *n = sp->root.rb_node;
2149
2150 while (n) {
2151 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2152
2153 if (start >= p->end)
2154 n = n->rb_right;
2155 else if (end <= p->start)
2156 n = n->rb_left;
2157 else
2158 break;
2159 }
2160 if (!n)
2161 return NULL;
2162 for (;;) {
2163 struct sp_node *w = NULL;
2164 struct rb_node *prev = rb_prev(n);
2165 if (!prev)
2166 break;
2167 w = rb_entry(prev, struct sp_node, nd);
2168 if (w->end <= start)
2169 break;
2170 n = prev;
2171 }
2172 return rb_entry(n, struct sp_node, nd);
2173}
2174
2175/* Insert a new shared policy into the list. */
2176/* Caller holds sp->lock */
2177static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2178{
2179 struct rb_node **p = &sp->root.rb_node;
2180 struct rb_node *parent = NULL;
2181 struct sp_node *nd;
2182
2183 while (*p) {
2184 parent = *p;
2185 nd = rb_entry(parent, struct sp_node, nd);
2186 if (new->start < nd->start)
2187 p = &(*p)->rb_left;
2188 else if (new->end > nd->end)
2189 p = &(*p)->rb_right;
2190 else
2191 BUG();
2192 }
2193 rb_link_node(&new->nd, parent, p);
2194 rb_insert_color(&new->nd, &sp->root);
140d5a49 2195 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
45c4745a 2196 new->policy ? new->policy->mode : 0);
1da177e4
LT
2197}
2198
2199/* Find shared policy intersecting idx */
2200struct mempolicy *
2201mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2202{
2203 struct mempolicy *pol = NULL;
2204 struct sp_node *sn;
2205
2206 if (!sp->root.rb_node)
2207 return NULL;
42288fe3 2208 spin_lock(&sp->lock);
1da177e4
LT
2209 sn = sp_lookup(sp, idx, idx+1);
2210 if (sn) {
2211 mpol_get(sn->policy);
2212 pol = sn->policy;
2213 }
42288fe3 2214 spin_unlock(&sp->lock);
1da177e4
LT
2215 return pol;
2216}
2217
63f74ca2
KM
2218static void sp_free(struct sp_node *n)
2219{
2220 mpol_put(n->policy);
2221 kmem_cache_free(sn_cache, n);
2222}
2223
771fb4d8
LS
2224/**
2225 * mpol_misplaced - check whether current page node is valid in policy
2226 *
2227 * @page - page to be checked
2228 * @vma - vm area where page mapped
2229 * @addr - virtual address where page mapped
2230 *
2231 * Lookup current policy node id for vma,addr and "compare to" page's
2232 * node id.
2233 *
2234 * Returns:
2235 * -1 - not misplaced, page is in the right node
2236 * node - node id where the page should be
2237 *
2238 * Policy determination "mimics" alloc_page_vma().
2239 * Called from fault path where we know the vma and faulting address.
2240 */
2241int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2242{
2243 struct mempolicy *pol;
2244 struct zone *zone;
2245 int curnid = page_to_nid(page);
2246 unsigned long pgoff;
2247 int polnid = -1;
2248 int ret = -1;
2249
2250 BUG_ON(!vma);
2251
2252 pol = get_vma_policy(current, vma, addr);
2253 if (!(pol->flags & MPOL_F_MOF))
2254 goto out;
2255
2256 switch (pol->mode) {
2257 case MPOL_INTERLEAVE:
2258 BUG_ON(addr >= vma->vm_end);
2259 BUG_ON(addr < vma->vm_start);
2260
2261 pgoff = vma->vm_pgoff;
2262 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2263 polnid = offset_il_node(pol, vma, pgoff);
2264 break;
2265
2266 case MPOL_PREFERRED:
2267 if (pol->flags & MPOL_F_LOCAL)
2268 polnid = numa_node_id();
2269 else
2270 polnid = pol->v.preferred_node;
2271 break;
2272
2273 case MPOL_BIND:
2274 /*
2275 * allows binding to multiple nodes.
2276 * use current page if in policy nodemask,
2277 * else select nearest allowed node, if any.
2278 * If no allowed nodes, use current [!misplaced].
2279 */
2280 if (node_isset(curnid, pol->v.nodes))
2281 goto out;
2282 (void)first_zones_zonelist(
2283 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2284 gfp_zone(GFP_HIGHUSER),
2285 &pol->v.nodes, &zone);
2286 polnid = zone->node;
2287 break;
2288
2289 default:
2290 BUG();
2291 }
5606e387
MG
2292
2293 /* Migrate the page towards the node whose CPU is referencing it */
e42c8ff2
MG
2294 if (pol->flags & MPOL_F_MORON) {
2295 int last_nid;
2296
5606e387
MG
2297 polnid = numa_node_id();
2298
e42c8ff2
MG
2299 /*
2300 * Multi-stage node selection is used in conjunction
2301 * with a periodic migration fault to build a temporal
2302 * task<->page relation. By using a two-stage filter we
2303 * remove short/unlikely relations.
2304 *
2305 * Using P(p) ~ n_p / n_t as per frequentist
2306 * probability, we can equate a task's usage of a
2307 * particular page (n_p) per total usage of this
2308 * page (n_t) (in a given time-span) to a probability.
2309 *
2310 * Our periodic faults will sample this probability and
2311 * getting the same result twice in a row, given these
2312 * samples are fully independent, is then given by
2313 * P(n)^2, provided our sample period is sufficiently
2314 * short compared to the usage pattern.
2315 *
2316 * This quadric squishes small probabilities, making
2317 * it less likely we act on an unlikely task<->page
2318 * relation.
2319 */
22b751c3 2320 last_nid = page_nid_xchg_last(page, polnid);
e42c8ff2
MG
2321 if (last_nid != polnid)
2322 goto out;
2323 }
2324
771fb4d8
LS
2325 if (curnid != polnid)
2326 ret = polnid;
2327out:
2328 mpol_cond_put(pol);
2329
2330 return ret;
2331}
2332
1da177e4
LT
2333static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2334{
140d5a49 2335 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
1da177e4 2336 rb_erase(&n->nd, &sp->root);
63f74ca2 2337 sp_free(n);
1da177e4
LT
2338}
2339
42288fe3
MG
2340static void sp_node_init(struct sp_node *node, unsigned long start,
2341 unsigned long end, struct mempolicy *pol)
2342{
2343 node->start = start;
2344 node->end = end;
2345 node->policy = pol;
2346}
2347
dbcb0f19
AB
2348static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2349 struct mempolicy *pol)
1da177e4 2350{
869833f2
KM
2351 struct sp_node *n;
2352 struct mempolicy *newpol;
1da177e4 2353
869833f2 2354 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1da177e4
LT
2355 if (!n)
2356 return NULL;
869833f2
KM
2357
2358 newpol = mpol_dup(pol);
2359 if (IS_ERR(newpol)) {
2360 kmem_cache_free(sn_cache, n);
2361 return NULL;
2362 }
2363 newpol->flags |= MPOL_F_SHARED;
42288fe3 2364 sp_node_init(n, start, end, newpol);
869833f2 2365
1da177e4
LT
2366 return n;
2367}
2368
2369/* Replace a policy range. */
2370static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2371 unsigned long end, struct sp_node *new)
2372{
b22d127a 2373 struct sp_node *n;
42288fe3
MG
2374 struct sp_node *n_new = NULL;
2375 struct mempolicy *mpol_new = NULL;
b22d127a 2376 int ret = 0;
1da177e4 2377
42288fe3
MG
2378restart:
2379 spin_lock(&sp->lock);
1da177e4
LT
2380 n = sp_lookup(sp, start, end);
2381 /* Take care of old policies in the same range. */
2382 while (n && n->start < end) {
2383 struct rb_node *next = rb_next(&n->nd);
2384 if (n->start >= start) {
2385 if (n->end <= end)
2386 sp_delete(sp, n);
2387 else
2388 n->start = end;
2389 } else {
2390 /* Old policy spanning whole new range. */
2391 if (n->end > end) {
42288fe3
MG
2392 if (!n_new)
2393 goto alloc_new;
2394
2395 *mpol_new = *n->policy;
2396 atomic_set(&mpol_new->refcnt, 1);
7880639c 2397 sp_node_init(n_new, end, n->end, mpol_new);
1da177e4 2398 n->end = start;
5ca39575 2399 sp_insert(sp, n_new);
42288fe3
MG
2400 n_new = NULL;
2401 mpol_new = NULL;
1da177e4
LT
2402 break;
2403 } else
2404 n->end = start;
2405 }
2406 if (!next)
2407 break;
2408 n = rb_entry(next, struct sp_node, nd);
2409 }
2410 if (new)
2411 sp_insert(sp, new);
42288fe3
MG
2412 spin_unlock(&sp->lock);
2413 ret = 0;
2414
2415err_out:
2416 if (mpol_new)
2417 mpol_put(mpol_new);
2418 if (n_new)
2419 kmem_cache_free(sn_cache, n_new);
2420
b22d127a 2421 return ret;
42288fe3
MG
2422
2423alloc_new:
2424 spin_unlock(&sp->lock);
2425 ret = -ENOMEM;
2426 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2427 if (!n_new)
2428 goto err_out;
2429 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2430 if (!mpol_new)
2431 goto err_out;
2432 goto restart;
1da177e4
LT
2433}
2434
71fe804b
LS
2435/**
2436 * mpol_shared_policy_init - initialize shared policy for inode
2437 * @sp: pointer to inode shared policy
2438 * @mpol: struct mempolicy to install
2439 *
2440 * Install non-NULL @mpol in inode's shared policy rb-tree.
2441 * On entry, the current task has a reference on a non-NULL @mpol.
2442 * This must be released on exit.
4bfc4495 2443 * This is called at get_inode() calls and we can use GFP_KERNEL.
71fe804b
LS
2444 */
2445void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2446{
58568d2a
MX
2447 int ret;
2448
71fe804b 2449 sp->root = RB_ROOT; /* empty tree == default mempolicy */
42288fe3 2450 spin_lock_init(&sp->lock);
71fe804b
LS
2451
2452 if (mpol) {
2453 struct vm_area_struct pvma;
2454 struct mempolicy *new;
4bfc4495 2455 NODEMASK_SCRATCH(scratch);
71fe804b 2456
4bfc4495 2457 if (!scratch)
5c0c1654 2458 goto put_mpol;
71fe804b
LS
2459 /* contextualize the tmpfs mount point mempolicy */
2460 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
15d77835 2461 if (IS_ERR(new))
0cae3457 2462 goto free_scratch; /* no valid nodemask intersection */
58568d2a
MX
2463
2464 task_lock(current);
4bfc4495 2465 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
58568d2a 2466 task_unlock(current);
15d77835 2467 if (ret)
5c0c1654 2468 goto put_new;
71fe804b
LS
2469
2470 /* Create pseudo-vma that contains just the policy */
2471 memset(&pvma, 0, sizeof(struct vm_area_struct));
2472 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2473 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
15d77835 2474
5c0c1654 2475put_new:
71fe804b 2476 mpol_put(new); /* drop initial ref */
0cae3457 2477free_scratch:
4bfc4495 2478 NODEMASK_SCRATCH_FREE(scratch);
5c0c1654
LS
2479put_mpol:
2480 mpol_put(mpol); /* drop our incoming ref on sb mpol */
7339ff83
RH
2481 }
2482}
2483
1da177e4
LT
2484int mpol_set_shared_policy(struct shared_policy *info,
2485 struct vm_area_struct *vma, struct mempolicy *npol)
2486{
2487 int err;
2488 struct sp_node *new = NULL;
2489 unsigned long sz = vma_pages(vma);
2490
028fec41 2491 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
1da177e4 2492 vma->vm_pgoff,
45c4745a 2493 sz, npol ? npol->mode : -1,
028fec41 2494 npol ? npol->flags : -1,
00ef2d2f 2495 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
1da177e4
LT
2496
2497 if (npol) {
2498 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2499 if (!new)
2500 return -ENOMEM;
2501 }
2502 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2503 if (err && new)
63f74ca2 2504 sp_free(new);
1da177e4
LT
2505 return err;
2506}
2507
2508/* Free a backing policy store on inode delete. */
2509void mpol_free_shared_policy(struct shared_policy *p)
2510{
2511 struct sp_node *n;
2512 struct rb_node *next;
2513
2514 if (!p->root.rb_node)
2515 return;
42288fe3 2516 spin_lock(&p->lock);
1da177e4
LT
2517 next = rb_first(&p->root);
2518 while (next) {
2519 n = rb_entry(next, struct sp_node, nd);
2520 next = rb_next(&n->nd);
63f74ca2 2521 sp_delete(p, n);
1da177e4 2522 }
42288fe3 2523 spin_unlock(&p->lock);
1da177e4
LT
2524}
2525
1a687c2e
MG
2526#ifdef CONFIG_NUMA_BALANCING
2527static bool __initdata numabalancing_override;
2528
2529static void __init check_numabalancing_enable(void)
2530{
2531 bool numabalancing_default = false;
2532
2533 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2534 numabalancing_default = true;
2535
2536 if (nr_node_ids > 1 && !numabalancing_override) {
2537 printk(KERN_INFO "Enabling automatic NUMA balancing. "
2538 "Configure with numa_balancing= or sysctl");
2539 set_numabalancing_state(numabalancing_default);
2540 }
2541}
2542
2543static int __init setup_numabalancing(char *str)
2544{
2545 int ret = 0;
2546 if (!str)
2547 goto out;
2548 numabalancing_override = true;
2549
2550 if (!strcmp(str, "enable")) {
2551 set_numabalancing_state(true);
2552 ret = 1;
2553 } else if (!strcmp(str, "disable")) {
2554 set_numabalancing_state(false);
2555 ret = 1;
2556 }
2557out:
2558 if (!ret)
2559 printk(KERN_WARNING "Unable to parse numa_balancing=\n");
2560
2561 return ret;
2562}
2563__setup("numa_balancing=", setup_numabalancing);
2564#else
2565static inline void __init check_numabalancing_enable(void)
2566{
2567}
2568#endif /* CONFIG_NUMA_BALANCING */
2569
1da177e4
LT
2570/* assumes fs == KERNEL_DS */
2571void __init numa_policy_init(void)
2572{
b71636e2
PM
2573 nodemask_t interleave_nodes;
2574 unsigned long largest = 0;
2575 int nid, prefer = 0;
2576
1da177e4
LT
2577 policy_cache = kmem_cache_create("numa_policy",
2578 sizeof(struct mempolicy),
20c2df83 2579 0, SLAB_PANIC, NULL);
1da177e4
LT
2580
2581 sn_cache = kmem_cache_create("shared_policy_node",
2582 sizeof(struct sp_node),
20c2df83 2583 0, SLAB_PANIC, NULL);
1da177e4 2584
5606e387
MG
2585 for_each_node(nid) {
2586 preferred_node_policy[nid] = (struct mempolicy) {
2587 .refcnt = ATOMIC_INIT(1),
2588 .mode = MPOL_PREFERRED,
2589 .flags = MPOL_F_MOF | MPOL_F_MORON,
2590 .v = { .preferred_node = nid, },
2591 };
2592 }
2593
b71636e2
PM
2594 /*
2595 * Set interleaving policy for system init. Interleaving is only
2596 * enabled across suitably sized nodes (default is >= 16MB), or
2597 * fall back to the largest node if they're all smaller.
2598 */
2599 nodes_clear(interleave_nodes);
01f13bd6 2600 for_each_node_state(nid, N_MEMORY) {
b71636e2
PM
2601 unsigned long total_pages = node_present_pages(nid);
2602
2603 /* Preserve the largest node */
2604 if (largest < total_pages) {
2605 largest = total_pages;
2606 prefer = nid;
2607 }
2608
2609 /* Interleave this node? */
2610 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2611 node_set(nid, interleave_nodes);
2612 }
2613
2614 /* All too small, use the largest */
2615 if (unlikely(nodes_empty(interleave_nodes)))
2616 node_set(prefer, interleave_nodes);
1da177e4 2617
028fec41 2618 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
1da177e4 2619 printk("numa_policy_init: interleaving failed\n");
1a687c2e
MG
2620
2621 check_numabalancing_enable();
1da177e4
LT
2622}
2623
8bccd85f 2624/* Reset policy of current process to default */
1da177e4
LT
2625void numa_default_policy(void)
2626{
028fec41 2627 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
1da177e4 2628}
68860ec1 2629
095f1fc4
LS
2630/*
2631 * Parse and format mempolicy from/to strings
2632 */
2633
1a75a6c8 2634/*
f2a07f40 2635 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
1a75a6c8 2636 */
345ace9c
LS
2637static const char * const policy_modes[] =
2638{
2639 [MPOL_DEFAULT] = "default",
2640 [MPOL_PREFERRED] = "prefer",
2641 [MPOL_BIND] = "bind",
2642 [MPOL_INTERLEAVE] = "interleave",
d3a71033 2643 [MPOL_LOCAL] = "local",
345ace9c 2644};
1a75a6c8 2645
095f1fc4
LS
2646
2647#ifdef CONFIG_TMPFS
2648/**
f2a07f40 2649 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
095f1fc4 2650 * @str: string containing mempolicy to parse
71fe804b 2651 * @mpol: pointer to struct mempolicy pointer, returned on success.
095f1fc4
LS
2652 *
2653 * Format of input:
2654 * <mode>[=<flags>][:<nodelist>]
2655 *
71fe804b 2656 * On success, returns 0, else 1
095f1fc4 2657 */
a7a88b23 2658int mpol_parse_str(char *str, struct mempolicy **mpol)
095f1fc4 2659{
71fe804b 2660 struct mempolicy *new = NULL;
b4652e84 2661 unsigned short mode;
f2a07f40 2662 unsigned short mode_flags;
71fe804b 2663 nodemask_t nodes;
095f1fc4
LS
2664 char *nodelist = strchr(str, ':');
2665 char *flags = strchr(str, '=');
095f1fc4
LS
2666 int err = 1;
2667
2668 if (nodelist) {
2669 /* NUL-terminate mode or flags string */
2670 *nodelist++ = '\0';
71fe804b 2671 if (nodelist_parse(nodelist, nodes))
095f1fc4 2672 goto out;
01f13bd6 2673 if (!nodes_subset(nodes, node_states[N_MEMORY]))
095f1fc4 2674 goto out;
71fe804b
LS
2675 } else
2676 nodes_clear(nodes);
2677
095f1fc4
LS
2678 if (flags)
2679 *flags++ = '\0'; /* terminate mode string */
2680
479e2802 2681 for (mode = 0; mode < MPOL_MAX; mode++) {
345ace9c 2682 if (!strcmp(str, policy_modes[mode])) {
095f1fc4
LS
2683 break;
2684 }
2685 }
a720094d 2686 if (mode >= MPOL_MAX)
095f1fc4
LS
2687 goto out;
2688
71fe804b 2689 switch (mode) {
095f1fc4 2690 case MPOL_PREFERRED:
71fe804b
LS
2691 /*
2692 * Insist on a nodelist of one node only
2693 */
095f1fc4
LS
2694 if (nodelist) {
2695 char *rest = nodelist;
2696 while (isdigit(*rest))
2697 rest++;
926f2ae0
KM
2698 if (*rest)
2699 goto out;
095f1fc4
LS
2700 }
2701 break;
095f1fc4
LS
2702 case MPOL_INTERLEAVE:
2703 /*
2704 * Default to online nodes with memory if no nodelist
2705 */
2706 if (!nodelist)
01f13bd6 2707 nodes = node_states[N_MEMORY];
3f226aa1 2708 break;
71fe804b 2709 case MPOL_LOCAL:
3f226aa1 2710 /*
71fe804b 2711 * Don't allow a nodelist; mpol_new() checks flags
3f226aa1 2712 */
71fe804b 2713 if (nodelist)
3f226aa1 2714 goto out;
71fe804b 2715 mode = MPOL_PREFERRED;
3f226aa1 2716 break;
413b43de
RT
2717 case MPOL_DEFAULT:
2718 /*
2719 * Insist on a empty nodelist
2720 */
2721 if (!nodelist)
2722 err = 0;
2723 goto out;
d69b2e63
KM
2724 case MPOL_BIND:
2725 /*
2726 * Insist on a nodelist
2727 */
2728 if (!nodelist)
2729 goto out;
095f1fc4
LS
2730 }
2731
71fe804b 2732 mode_flags = 0;
095f1fc4
LS
2733 if (flags) {
2734 /*
2735 * Currently, we only support two mutually exclusive
2736 * mode flags.
2737 */
2738 if (!strcmp(flags, "static"))
71fe804b 2739 mode_flags |= MPOL_F_STATIC_NODES;
095f1fc4 2740 else if (!strcmp(flags, "relative"))
71fe804b 2741 mode_flags |= MPOL_F_RELATIVE_NODES;
095f1fc4 2742 else
926f2ae0 2743 goto out;
095f1fc4 2744 }
71fe804b
LS
2745
2746 new = mpol_new(mode, mode_flags, &nodes);
2747 if (IS_ERR(new))
926f2ae0
KM
2748 goto out;
2749
f2a07f40
HD
2750 /*
2751 * Save nodes for mpol_to_str() to show the tmpfs mount options
2752 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2753 */
2754 if (mode != MPOL_PREFERRED)
2755 new->v.nodes = nodes;
2756 else if (nodelist)
2757 new->v.preferred_node = first_node(nodes);
2758 else
2759 new->flags |= MPOL_F_LOCAL;
2760
2761 /*
2762 * Save nodes for contextualization: this will be used to "clone"
2763 * the mempolicy in a specific context [cpuset] at a later time.
2764 */
2765 new->w.user_nodemask = nodes;
2766
926f2ae0 2767 err = 0;
71fe804b 2768
095f1fc4
LS
2769out:
2770 /* Restore string for error message */
2771 if (nodelist)
2772 *--nodelist = ':';
2773 if (flags)
2774 *--flags = '=';
71fe804b
LS
2775 if (!err)
2776 *mpol = new;
095f1fc4
LS
2777 return err;
2778}
2779#endif /* CONFIG_TMPFS */
2780
71fe804b
LS
2781/**
2782 * mpol_to_str - format a mempolicy structure for printing
2783 * @buffer: to contain formatted mempolicy string
2784 * @maxlen: length of @buffer
2785 * @pol: pointer to mempolicy to be formatted
71fe804b 2786 *
1a75a6c8
CL
2787 * Convert a mempolicy into a string.
2788 * Returns the number of characters in buffer (if positive)
2789 * or an error (negative)
2790 */
a7a88b23 2791int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1a75a6c8
CL
2792{
2793 char *p = buffer;
2794 int l;
2795 nodemask_t nodes;
bea904d5 2796 unsigned short mode;
f5b087b5 2797 unsigned short flags = pol ? pol->flags : 0;
1a75a6c8 2798
2291990a
LS
2799 /*
2800 * Sanity check: room for longest mode, flag and some nodes
2801 */
2802 VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
2803
bea904d5
LS
2804 if (!pol || pol == &default_policy)
2805 mode = MPOL_DEFAULT;
2806 else
2807 mode = pol->mode;
2808
1a75a6c8
CL
2809 switch (mode) {
2810 case MPOL_DEFAULT:
2811 nodes_clear(nodes);
2812 break;
2813
2814 case MPOL_PREFERRED:
2815 nodes_clear(nodes);
fc36b8d3 2816 if (flags & MPOL_F_LOCAL)
f2a07f40 2817 mode = MPOL_LOCAL;
53f2556b 2818 else
fc36b8d3 2819 node_set(pol->v.preferred_node, nodes);
1a75a6c8
CL
2820 break;
2821
2822 case MPOL_BIND:
19770b32 2823 /* Fall through */
1a75a6c8 2824 case MPOL_INTERLEAVE:
f2a07f40 2825 nodes = pol->v.nodes;
1a75a6c8
CL
2826 break;
2827
2828 default:
80de7c31 2829 return -EINVAL;
1a75a6c8
CL
2830 }
2831
345ace9c 2832 l = strlen(policy_modes[mode]);
53f2556b
LS
2833 if (buffer + maxlen < p + l + 1)
2834 return -ENOSPC;
1a75a6c8 2835
345ace9c 2836 strcpy(p, policy_modes[mode]);
1a75a6c8
CL
2837 p += l;
2838
fc36b8d3 2839 if (flags & MPOL_MODE_FLAGS) {
f5b087b5
DR
2840 if (buffer + maxlen < p + 2)
2841 return -ENOSPC;
2842 *p++ = '=';
2843
2291990a
LS
2844 /*
2845 * Currently, the only defined flags are mutually exclusive
2846 */
f5b087b5 2847 if (flags & MPOL_F_STATIC_NODES)
2291990a
LS
2848 p += snprintf(p, buffer + maxlen - p, "static");
2849 else if (flags & MPOL_F_RELATIVE_NODES)
2850 p += snprintf(p, buffer + maxlen - p, "relative");
f5b087b5
DR
2851 }
2852
1a75a6c8
CL
2853 if (!nodes_empty(nodes)) {
2854 if (buffer + maxlen < p + 2)
2855 return -ENOSPC;
095f1fc4 2856 *p++ = ':';
1a75a6c8
CL
2857 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
2858 }
2859 return p - buffer;
2860}