nlm: Ensure callback code also checks that the files match
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / linux / cpuset.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_CPUSET_H
2#define _LINUX_CPUSET_H
3/*
4 * cpuset interface
5 *
6 * Copyright (C) 2003 BULL SA
825a46af 7 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
1da177e4
LT
8 *
9 */
10
11#include <linux/sched.h>
12#include <linux/cpumask.h>
13#include <linux/nodemask.h>
a1bc5a4e 14#include <linux/mm.h>
1da177e4
LT
15
16#ifdef CONFIG_CPUSETS
17
202f72d5
PJ
18extern int number_of_cpusets; /* How many cpusets are defined in system? */
19
1da177e4
LT
20extern int cpuset_init(void);
21extern void cpuset_init_smp(void);
7ddf96b0 22extern void cpuset_update_active_cpus(bool cpu_online);
6af866af 23extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
2baab4e9 24extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
909d75a3 25extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
9276b1bc 26#define cpuset_current_mems_allowed (current->mems_allowed)
1da177e4 27void cpuset_init_current_mems_allowed(void);
19770b32 28int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
202f72d5 29
a1bc5a4e
DR
30extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
31extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
02a0e53d 32
a1bc5a4e 33static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
02a0e53d
PJ
34{
35 return number_of_cpusets <= 1 ||
a1bc5a4e 36 __cpuset_node_allowed_softwall(node, gfp_mask);
02a0e53d
PJ
37}
38
a1bc5a4e 39static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
202f72d5 40{
02a0e53d 41 return number_of_cpusets <= 1 ||
a1bc5a4e
DR
42 __cpuset_node_allowed_hardwall(node, gfp_mask);
43}
44
45static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
46{
47 return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
48}
49
50static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
51{
52 return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
202f72d5
PJ
53}
54
bbe373f2
DR
55extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
56 const struct task_struct *tsk2);
3e0d98b9
PJ
57
58#define cpuset_memory_pressure_bump() \
59 do { \
60 if (cpuset_memory_pressure_enabled) \
61 __cpuset_memory_pressure_bump(); \
62 } while (0)
63extern int cpuset_memory_pressure_enabled;
64extern void __cpuset_memory_pressure_bump(void);
65
df5f8314
EB
66extern void cpuset_task_status_allowed(struct seq_file *m,
67 struct task_struct *task);
8d8b97ba 68extern int proc_cpuset_show(struct seq_file *, void *);
1da177e4 69
825a46af 70extern int cpuset_mem_spread_node(void);
6adef3eb 71extern int cpuset_slab_spread_node(void);
825a46af
PJ
72
73static inline int cpuset_do_page_mem_spread(void)
74{
75 return current->flags & PF_SPREAD_PAGE;
76}
77
78static inline int cpuset_do_slab_mem_spread(void)
79{
80 return current->flags & PF_SPREAD_SLAB;
81}
82
8793d854
PM
83extern int current_cpuset_is_being_rebound(void);
84
e761b772
MK
85extern void rebuild_sched_domains(void);
86
75aa1994
DR
87extern void cpuset_print_task_mems_allowed(struct task_struct *p);
88
c0ff7453 89/*
cc9a6c87
MG
90 * get_mems_allowed is required when making decisions involving mems_allowed
91 * such as during page allocation. mems_allowed can be updated in parallel
92 * and depending on the new value an operation can fail potentially causing
93 * process failure. A retry loop with get_mems_allowed and put_mems_allowed
94 * prevents these artificial failures.
c0ff7453 95 */
cc9a6c87 96static inline unsigned int get_mems_allowed(void)
c0ff7453 97{
cc9a6c87 98 return read_seqcount_begin(&current->mems_allowed_seq);
c0ff7453
MX
99}
100
cc9a6c87
MG
101/*
102 * If this returns false, the operation that took place after get_mems_allowed
103 * may have failed. It is up to the caller to retry the operation if
104 * appropriate.
105 */
106static inline bool put_mems_allowed(unsigned int seq)
c0ff7453 107{
cc9a6c87 108 return !read_seqcount_retry(&current->mems_allowed_seq, seq);
c0ff7453
MX
109}
110
58568d2a
MX
111static inline void set_mems_allowed(nodemask_t nodemask)
112{
c0ff7453 113 task_lock(current);
cc9a6c87 114 write_seqcount_begin(&current->mems_allowed_seq);
58568d2a 115 current->mems_allowed = nodemask;
cc9a6c87 116 write_seqcount_end(&current->mems_allowed_seq);
c0ff7453 117 task_unlock(current);
58568d2a
MX
118}
119
1da177e4
LT
120#else /* !CONFIG_CPUSETS */
121
122static inline int cpuset_init(void) { return 0; }
123static inline void cpuset_init_smp(void) {}
1da177e4 124
7ddf96b0 125static inline void cpuset_update_active_cpus(bool cpu_online)
3a101d05
TH
126{
127 partition_sched_domains(1, NULL, NULL);
128}
129
6af866af
LZ
130static inline void cpuset_cpus_allowed(struct task_struct *p,
131 struct cpumask *mask)
1da177e4 132{
aa85ea5b 133 cpumask_copy(mask, cpu_possible_mask);
1da177e4
LT
134}
135
2baab4e9 136static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
9084bb82 137{
9084bb82
ON
138}
139
909d75a3
PJ
140static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
141{
142 return node_possible_map;
143}
144
38d7bee9 145#define cpuset_current_mems_allowed (node_states[N_MEMORY])
1da177e4 146static inline void cpuset_init_current_mems_allowed(void) {}
1da177e4 147
19770b32 148static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
1da177e4
LT
149{
150 return 1;
151}
152
a1bc5a4e
DR
153static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
154{
155 return 1;
156}
157
158static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
159{
160 return 1;
161}
162
02a0e53d
PJ
163static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
164{
165 return 1;
166}
167
168static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
1da177e4
LT
169{
170 return 1;
171}
172
bbe373f2
DR
173static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
174 const struct task_struct *tsk2)
ef08e3b4
PJ
175{
176 return 1;
177}
178
3e0d98b9
PJ
179static inline void cpuset_memory_pressure_bump(void) {}
180
df5f8314
EB
181static inline void cpuset_task_status_allowed(struct seq_file *m,
182 struct task_struct *task)
1da177e4 183{
1da177e4
LT
184}
185
825a46af
PJ
186static inline int cpuset_mem_spread_node(void)
187{
188 return 0;
189}
190
6adef3eb
JS
191static inline int cpuset_slab_spread_node(void)
192{
193 return 0;
194}
195
825a46af
PJ
196static inline int cpuset_do_page_mem_spread(void)
197{
198 return 0;
199}
200
201static inline int cpuset_do_slab_mem_spread(void)
202{
203 return 0;
204}
205
8793d854
PM
206static inline int current_cpuset_is_being_rebound(void)
207{
208 return 0;
209}
210
e761b772
MK
211static inline void rebuild_sched_domains(void)
212{
dfb512ec 213 partition_sched_domains(1, NULL, NULL);
e761b772
MK
214}
215
75aa1994
DR
216static inline void cpuset_print_task_mems_allowed(struct task_struct *p)
217{
218}
219
58568d2a
MX
220static inline void set_mems_allowed(nodemask_t nodemask)
221{
222}
223
cc9a6c87 224static inline unsigned int get_mems_allowed(void)
c0ff7453 225{
cc9a6c87 226 return 0;
c0ff7453
MX
227}
228
cc9a6c87 229static inline bool put_mems_allowed(unsigned int seq)
c0ff7453 230{
cc9a6c87 231 return true;
c0ff7453
MX
232}
233
1da177e4
LT
234#endif /* !CONFIG_CPUSETS */
235
236#endif /* _LINUX_CPUSET_H */