nlm: Ensure callback code also checks that the files match
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / linux / mempolicy.h
CommitLineData
1da177e4
LT
1/*
2 * NUMA memory policies for Linux.
3 * Copyright 2003,2004 Andi Kleen SuSE Labs
4 */
607ca46e
DH
5#ifndef _LINUX_MEMPOLICY_H
6#define _LINUX_MEMPOLICY_H 1
1da177e4 7
1da177e4 8
1da177e4 9#include <linux/mmzone.h>
1da177e4
LT
10#include <linux/slab.h>
11#include <linux/rbtree.h>
12#include <linux/spinlock.h>
dfcd3c0d 13#include <linux/nodemask.h>
83d1674a 14#include <linux/pagemap.h>
607ca46e 15#include <uapi/linux/mempolicy.h>
1da177e4 16
45b35a5c 17struct mm_struct;
1da177e4
LT
18
19#ifdef CONFIG_NUMA
20
21/*
22 * Describe a memory policy.
23 *
24 * A mempolicy can be either associated with a process or with a VMA.
25 * For VMA related allocations the VMA policy is preferred, otherwise
26 * the process policy is used. Interrupts ignore the memory policy
27 * of the current process.
28 *
29 * Locking policy for interlave:
30 * In process context there is no locking because only the process accesses
31 * its own state. All vma manipulation is somewhat protected by a down_read on
b8072f09 32 * mmap_sem.
1da177e4
LT
33 *
34 * Freeing policy:
19770b32 35 * Mempolicy objects are reference counted. A mempolicy will be freed when
f0be3d32 36 * mpol_put() decrements the reference count to zero.
1da177e4 37 *
846a16bf
LS
38 * Duplicating policy objects:
39 * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
19770b32 40 * to the new storage. The reference count of the new object is initialized
846a16bf 41 * to 1, representing the caller of mpol_dup().
1da177e4
LT
42 */
43struct mempolicy {
44 atomic_t refcnt;
45c4745a 45 unsigned short mode; /* See MPOL_* above */
028fec41 46 unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
1da177e4 47 union {
1da177e4 48 short preferred_node; /* preferred */
19770b32 49 nodemask_t nodes; /* interleave/bind */
1da177e4
LT
50 /* undefined for default */
51 } v;
f5b087b5
DR
52 union {
53 nodemask_t cpuset_mems_allowed; /* relative to these nodes */
54 nodemask_t user_nodemask; /* nodemask passed by user */
55 } w;
1da177e4
LT
56};
57
58/*
59 * Support for managing mempolicy data objects (clone, copy, destroy)
60 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
61 */
62
f0be3d32
LS
63extern void __mpol_put(struct mempolicy *pol);
64static inline void mpol_put(struct mempolicy *pol)
1da177e4
LT
65{
66 if (pol)
f0be3d32 67 __mpol_put(pol);
1da177e4
LT
68}
69
52cd3b07
LS
70/*
71 * Does mempolicy pol need explicit unref after use?
72 * Currently only needed for shared policies.
73 */
74static inline int mpol_needs_cond_ref(struct mempolicy *pol)
75{
76 return (pol && (pol->flags & MPOL_F_SHARED));
77}
78
79static inline void mpol_cond_put(struct mempolicy *pol)
80{
81 if (mpol_needs_cond_ref(pol))
82 __mpol_put(pol);
83}
84
846a16bf
LS
85extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
86static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
1da177e4
LT
87{
88 if (pol)
846a16bf 89 pol = __mpol_dup(pol);
1da177e4
LT
90 return pol;
91}
92
93#define vma_policy(vma) ((vma)->vm_policy)
94#define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol))
95
96static inline void mpol_get(struct mempolicy *pol)
97{
98 if (pol)
99 atomic_inc(&pol->refcnt);
100}
101
fcfb4dcc
KM
102extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b);
103static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
1da177e4
LT
104{
105 if (a == b)
fcfb4dcc 106 return true;
1da177e4
LT
107 return __mpol_equal(a, b);
108}
1da177e4 109
1da177e4
LT
110/*
111 * Tree of shared policies for a shared memory region.
112 * Maintain the policies in a pseudo mm that contains vmas. The vmas
113 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
114 * bytes, so that we can work with shared memory segments bigger than
115 * unsigned long.
116 */
117
118struct sp_node {
119 struct rb_node nd;
120 unsigned long start, end;
121 struct mempolicy *policy;
122};
123
124struct shared_policy {
125 struct rb_root root;
42288fe3 126 spinlock_t lock;
1da177e4
LT
127};
128
71fe804b 129void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
1da177e4
LT
130int mpol_set_shared_policy(struct shared_policy *info,
131 struct vm_area_struct *vma,
132 struct mempolicy *new);
133void mpol_free_shared_policy(struct shared_policy *p);
134struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
135 unsigned long idx);
136
d98f6cb6
SW
137struct mempolicy *get_vma_policy(struct task_struct *tsk,
138 struct vm_area_struct *vma, unsigned long addr);
139
1da177e4
LT
140extern void numa_default_policy(void);
141extern void numa_policy_init(void);
708c1bbc
MX
142extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
143 enum mpol_rebind_step step);
4225399a 144extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
c61afb18 145extern void mpol_fix_fork_child_flag(struct task_struct *p);
4225399a 146
5da7ca86 147extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
19770b32
MG
148 unsigned long addr, gfp_t gfp_flags,
149 struct mempolicy **mpol, nodemask_t **nodemask);
06808b08 150extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
6f48d0eb
DR
151extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
152 const nodemask_t *mask);
e7b691b0 153extern unsigned slab_node(void);
1da177e4 154
2f6726e5 155extern enum zone_type policy_zone;
4be38e35 156
2f6726e5 157static inline void check_highest_zone(enum zone_type k)
4be38e35 158{
b377fd39 159 if (k > policy_zone && k != ZONE_MOVABLE)
4be38e35
CL
160 policy_zone = k;
161}
162
0ce72d4f
AM
163int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
164 const nodemask_t *to, int flags);
39743889 165
095f1fc4
LS
166
167#ifdef CONFIG_TMPFS
a7a88b23 168extern int mpol_parse_str(char *str, struct mempolicy **mpol);
13057efb 169#endif
095f1fc4 170
a7a88b23 171extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
83d1674a
GS
172
173/* Check if a vma is migratable */
174static inline int vma_migratable(struct vm_area_struct *vma)
175{
314e51b9 176 if (vma->vm_flags & (VM_IO | VM_HUGETLB | VM_PFNMAP))
83d1674a
GS
177 return 0;
178 /*
179 * Migration allocates pages in the highest zone. If we cannot
180 * do so then migration (at least from node to node) is not
181 * possible.
182 */
183 if (vma->vm_file &&
184 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
185 < policy_zone)
186 return 0;
187 return 1;
188}
189
771fb4d8
LS
190extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
191
1da177e4
LT
192#else
193
194struct mempolicy {};
195
fcfb4dcc 196static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
1da177e4 197{
fcfb4dcc 198 return true;
1da177e4 199}
1da177e4 200
f0be3d32 201static inline void mpol_put(struct mempolicy *p)
1da177e4
LT
202{
203}
204
52cd3b07
LS
205static inline void mpol_cond_put(struct mempolicy *pol)
206{
207}
208
1da177e4
LT
209static inline void mpol_get(struct mempolicy *pol)
210{
211}
212
846a16bf 213static inline struct mempolicy *mpol_dup(struct mempolicy *old)
1da177e4
LT
214{
215 return NULL;
216}
217
1da177e4
LT
218struct shared_policy {};
219
220static inline int mpol_set_shared_policy(struct shared_policy *info,
221 struct vm_area_struct *vma,
222 struct mempolicy *new)
223{
224 return -EINVAL;
225}
226
71fe804b
LS
227static inline void mpol_shared_policy_init(struct shared_policy *sp,
228 struct mempolicy *mpol)
1da177e4
LT
229{
230}
231
232static inline void mpol_free_shared_policy(struct shared_policy *p)
233{
234}
235
236static inline struct mempolicy *
237mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
238{
239 return NULL;
240}
241
242#define vma_policy(vma) NULL
243#define vma_set_policy(vma, pol) do {} while(0)
244
245static inline void numa_policy_init(void)
246{
247}
248
249static inline void numa_default_policy(void)
250{
251}
252
74cb2155 253static inline void mpol_rebind_task(struct task_struct *tsk,
708c1bbc
MX
254 const nodemask_t *new,
255 enum mpol_rebind_step step)
68860ec1
PJ
256{
257}
258
4225399a
PJ
259static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
260{
261}
262
c61afb18
PJ
263static inline void mpol_fix_fork_child_flag(struct task_struct *p)
264{
265}
266
5da7ca86 267static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
19770b32
MG
268 unsigned long addr, gfp_t gfp_flags,
269 struct mempolicy **mpol, nodemask_t **nodemask)
5da7ca86 270{
19770b32
MG
271 *mpol = NULL;
272 *nodemask = NULL;
0e88460d 273 return node_zonelist(0, gfp_flags);
5da7ca86
CL
274}
275
6f48d0eb
DR
276static inline bool init_nodemask_of_mempolicy(nodemask_t *m)
277{
278 return false;
279}
280
281static inline bool mempolicy_nodemask_intersects(struct task_struct *tsk,
282 const nodemask_t *mask)
283{
284 return false;
285}
06808b08 286
0ce72d4f
AM
287static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
288 const nodemask_t *to, int flags)
45b07ef3
PJ
289{
290 return 0;
291}
292
4be38e35
CL
293static inline void check_highest_zone(int k)
294{
295}
095f1fc4
LS
296
297#ifdef CONFIG_TMPFS
a7a88b23 298static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
095f1fc4 299{
71fe804b 300 return 1; /* error */
095f1fc4 301}
13057efb 302#endif
095f1fc4 303
a7a88b23 304static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
095f1fc4
LS
305{
306 return 0;
307}
095f1fc4 308
771fb4d8
LS
309static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
310 unsigned long address)
311{
312 return -1; /* no node preference */
313}
314
1da177e4 315#endif /* CONFIG_NUMA */
1da177e4 316#endif