Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * kernel/cpuset.c | |
3 | * | |
4 | * Processor and Memory placement constraints for sets of tasks. | |
5 | * | |
6 | * Copyright (C) 2003 BULL SA. | |
029190c5 | 7 | * Copyright (C) 2004-2007 Silicon Graphics, Inc. |
8793d854 | 8 | * Copyright (C) 2006 Google, Inc |
1da177e4 LT |
9 | * |
10 | * Portions derived from Patrick Mochel's sysfs code. | |
11 | * sysfs is Copyright (c) 2001-3 Patrick Mochel | |
1da177e4 | 12 | * |
825a46af | 13 | * 2003-10-10 Written by Simon Derr. |
1da177e4 | 14 | * 2003-10-22 Updates by Stephen Hemminger. |
825a46af | 15 | * 2004 May-July Rework by Paul Jackson. |
8793d854 | 16 | * 2006 Rework by Paul Menage to use generic cgroups |
cf417141 MK |
17 | * 2008 Rework of the scheduler domains and CPU hotplug handling |
18 | * by Max Krasnyansky | |
1da177e4 LT |
19 | * |
20 | * This file is subject to the terms and conditions of the GNU General Public | |
21 | * License. See the file COPYING in the main directory of the Linux | |
22 | * distribution for more details. | |
23 | */ | |
24 | ||
1da177e4 LT |
25 | #include <linux/cpu.h> |
26 | #include <linux/cpumask.h> | |
27 | #include <linux/cpuset.h> | |
28 | #include <linux/err.h> | |
29 | #include <linux/errno.h> | |
30 | #include <linux/file.h> | |
31 | #include <linux/fs.h> | |
32 | #include <linux/init.h> | |
33 | #include <linux/interrupt.h> | |
34 | #include <linux/kernel.h> | |
35 | #include <linux/kmod.h> | |
36 | #include <linux/list.h> | |
68860ec1 | 37 | #include <linux/mempolicy.h> |
1da177e4 | 38 | #include <linux/mm.h> |
f481891f | 39 | #include <linux/memory.h> |
9984de1a | 40 | #include <linux/export.h> |
1da177e4 LT |
41 | #include <linux/mount.h> |
42 | #include <linux/namei.h> | |
43 | #include <linux/pagemap.h> | |
44 | #include <linux/proc_fs.h> | |
6b9c2603 | 45 | #include <linux/rcupdate.h> |
1da177e4 LT |
46 | #include <linux/sched.h> |
47 | #include <linux/seq_file.h> | |
22fb52dd | 48 | #include <linux/security.h> |
1da177e4 | 49 | #include <linux/slab.h> |
1da177e4 LT |
50 | #include <linux/spinlock.h> |
51 | #include <linux/stat.h> | |
52 | #include <linux/string.h> | |
53 | #include <linux/time.h> | |
54 | #include <linux/backing-dev.h> | |
55 | #include <linux/sort.h> | |
56 | ||
57 | #include <asm/uaccess.h> | |
60063497 | 58 | #include <linux/atomic.h> |
3d3f26a7 | 59 | #include <linux/mutex.h> |
956db3ca | 60 | #include <linux/cgroup.h> |
e44193d3 | 61 | #include <linux/wait.h> |
1da177e4 | 62 | |
97e37140 | 63 | struct static_key cpusets_pre_enable_key __read_mostly = STATIC_KEY_INIT_FALSE; |
664eedde | 64 | struct static_key cpusets_enabled_key __read_mostly = STATIC_KEY_INIT_FALSE; |
202f72d5 | 65 | |
3e0d98b9 PJ |
66 | /* See "Frequency meter" comments, below. */ |
67 | ||
68 | struct fmeter { | |
69 | int cnt; /* unprocessed events count */ | |
70 | int val; /* most recent output value */ | |
71 | time_t time; /* clock (secs) when val computed */ | |
72 | spinlock_t lock; /* guards read or write of above */ | |
73 | }; | |
74 | ||
1da177e4 | 75 | struct cpuset { |
8793d854 PM |
76 | struct cgroup_subsys_state css; |
77 | ||
1da177e4 | 78 | unsigned long flags; /* "unsigned long" so bitops work */ |
e2b9a3d7 | 79 | |
7e88291b LZ |
80 | /* |
81 | * On default hierarchy: | |
82 | * | |
83 | * The user-configured masks can only be changed by writing to | |
84 | * cpuset.cpus and cpuset.mems, and won't be limited by the | |
85 | * parent masks. | |
86 | * | |
87 | * The effective masks is the real masks that apply to the tasks | |
88 | * in the cpuset. They may be changed if the configured masks are | |
89 | * changed or hotplug happens. | |
90 | * | |
91 | * effective_mask == configured_mask & parent's effective_mask, | |
92 | * and if it ends up empty, it will inherit the parent's mask. | |
93 | * | |
94 | * | |
95 | * On legacy hierachy: | |
96 | * | |
97 | * The user-configured masks are always the same with effective masks. | |
98 | */ | |
99 | ||
e2b9a3d7 LZ |
100 | /* user-configured CPUs and Memory Nodes allow to tasks */ |
101 | cpumask_var_t cpus_allowed; | |
cd55f3c2 | 102 | cpumask_var_t cpus_requested; |
e2b9a3d7 LZ |
103 | nodemask_t mems_allowed; |
104 | ||
105 | /* effective CPUs and Memory Nodes allow to tasks */ | |
106 | cpumask_var_t effective_cpus; | |
107 | nodemask_t effective_mems; | |
1da177e4 | 108 | |
33ad801d LZ |
109 | /* |
110 | * This is old Memory Nodes tasks took on. | |
111 | * | |
112 | * - top_cpuset.old_mems_allowed is initialized to mems_allowed. | |
113 | * - A new cpuset's old_mems_allowed is initialized when some | |
114 | * task is moved into it. | |
115 | * - old_mems_allowed is used in cpuset_migrate_mm() when we change | |
116 | * cpuset.mems_allowed and have tasks' nodemask updated, and | |
117 | * then old_mems_allowed is updated to mems_allowed. | |
118 | */ | |
119 | nodemask_t old_mems_allowed; | |
120 | ||
3e0d98b9 | 121 | struct fmeter fmeter; /* memory_pressure filter */ |
029190c5 | 122 | |
452477fa TH |
123 | /* |
124 | * Tasks are being attached to this cpuset. Used to prevent | |
125 | * zeroing cpus/mems_allowed between ->can_attach() and ->attach(). | |
126 | */ | |
127 | int attach_in_progress; | |
128 | ||
029190c5 PJ |
129 | /* partition number for rebuild_sched_domains() */ |
130 | int pn; | |
956db3ca | 131 | |
1d3504fc HS |
132 | /* for custom sched domain */ |
133 | int relax_domain_level; | |
1da177e4 LT |
134 | }; |
135 | ||
a7c6d554 | 136 | static inline struct cpuset *css_cs(struct cgroup_subsys_state *css) |
8793d854 | 137 | { |
a7c6d554 | 138 | return css ? container_of(css, struct cpuset, css) : NULL; |
8793d854 PM |
139 | } |
140 | ||
141 | /* Retrieve the cpuset for a task */ | |
142 | static inline struct cpuset *task_cs(struct task_struct *task) | |
143 | { | |
073219e9 | 144 | return css_cs(task_css(task, cpuset_cgrp_id)); |
8793d854 | 145 | } |
8793d854 | 146 | |
c9710d80 | 147 | static inline struct cpuset *parent_cs(struct cpuset *cs) |
c431069f | 148 | { |
5c9d535b | 149 | return css_cs(cs->css.parent); |
c431069f TH |
150 | } |
151 | ||
b246272e DR |
152 | #ifdef CONFIG_NUMA |
153 | static inline bool task_has_mempolicy(struct task_struct *task) | |
154 | { | |
155 | return task->mempolicy; | |
156 | } | |
157 | #else | |
158 | static inline bool task_has_mempolicy(struct task_struct *task) | |
159 | { | |
160 | return false; | |
161 | } | |
162 | #endif | |
163 | ||
164 | ||
1da177e4 LT |
165 | /* bits in struct cpuset flags field */ |
166 | typedef enum { | |
efeb77b2 | 167 | CS_ONLINE, |
1da177e4 LT |
168 | CS_CPU_EXCLUSIVE, |
169 | CS_MEM_EXCLUSIVE, | |
78608366 | 170 | CS_MEM_HARDWALL, |
45b07ef3 | 171 | CS_MEMORY_MIGRATE, |
029190c5 | 172 | CS_SCHED_LOAD_BALANCE, |
825a46af PJ |
173 | CS_SPREAD_PAGE, |
174 | CS_SPREAD_SLAB, | |
1cac41cb | 175 | CS_FAMILY_BOOST, |
1da177e4 LT |
176 | } cpuset_flagbits_t; |
177 | ||
178 | /* convenient tests for these bits */ | |
c8acec90 | 179 | static inline bool is_cpuset_online(struct cpuset *cs) |
efeb77b2 | 180 | { |
c8acec90 | 181 | return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); |
efeb77b2 TH |
182 | } |
183 | ||
1da177e4 LT |
184 | static inline int is_cpu_exclusive(const struct cpuset *cs) |
185 | { | |
7b5b9ef0 | 186 | return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); |
1da177e4 LT |
187 | } |
188 | ||
189 | static inline int is_mem_exclusive(const struct cpuset *cs) | |
190 | { | |
7b5b9ef0 | 191 | return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); |
1da177e4 LT |
192 | } |
193 | ||
78608366 PM |
194 | static inline int is_mem_hardwall(const struct cpuset *cs) |
195 | { | |
196 | return test_bit(CS_MEM_HARDWALL, &cs->flags); | |
197 | } | |
198 | ||
029190c5 PJ |
199 | static inline int is_sched_load_balance(const struct cpuset *cs) |
200 | { | |
201 | return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); | |
202 | } | |
203 | ||
45b07ef3 PJ |
204 | static inline int is_memory_migrate(const struct cpuset *cs) |
205 | { | |
7b5b9ef0 | 206 | return test_bit(CS_MEMORY_MIGRATE, &cs->flags); |
45b07ef3 PJ |
207 | } |
208 | ||
825a46af PJ |
209 | static inline int is_spread_page(const struct cpuset *cs) |
210 | { | |
211 | return test_bit(CS_SPREAD_PAGE, &cs->flags); | |
212 | } | |
213 | ||
214 | static inline int is_spread_slab(const struct cpuset *cs) | |
215 | { | |
216 | return test_bit(CS_SPREAD_SLAB, &cs->flags); | |
217 | } | |
218 | ||
1da177e4 | 219 | static struct cpuset top_cpuset = { |
efeb77b2 TH |
220 | .flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) | |
221 | (1 << CS_MEM_EXCLUSIVE)), | |
1da177e4 LT |
222 | }; |
223 | ||
1cac41cb MB |
224 | static inline int is_family_boost_enabled(const struct cpuset *cs) |
225 | { | |
226 | return test_bit(CS_FAMILY_BOOST, &cs->flags); | |
227 | } | |
228 | ||
ae8086ce TH |
229 | /** |
230 | * cpuset_for_each_child - traverse online children of a cpuset | |
231 | * @child_cs: loop cursor pointing to the current child | |
492eb21b | 232 | * @pos_css: used for iteration |
ae8086ce TH |
233 | * @parent_cs: target cpuset to walk children of |
234 | * | |
235 | * Walk @child_cs through the online children of @parent_cs. Must be used | |
236 | * with RCU read locked. | |
237 | */ | |
492eb21b TH |
238 | #define cpuset_for_each_child(child_cs, pos_css, parent_cs) \ |
239 | css_for_each_child((pos_css), &(parent_cs)->css) \ | |
240 | if (is_cpuset_online(((child_cs) = css_cs((pos_css))))) | |
ae8086ce | 241 | |
fc560a26 TH |
242 | /** |
243 | * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants | |
244 | * @des_cs: loop cursor pointing to the current descendant | |
492eb21b | 245 | * @pos_css: used for iteration |
fc560a26 TH |
246 | * @root_cs: target cpuset to walk ancestor of |
247 | * | |
248 | * Walk @des_cs through the online descendants of @root_cs. Must be used | |
492eb21b | 249 | * with RCU read locked. The caller may modify @pos_css by calling |
bd8815a6 TH |
250 | * css_rightmost_descendant() to skip subtree. @root_cs is included in the |
251 | * iteration and the first node to be visited. | |
fc560a26 | 252 | */ |
492eb21b TH |
253 | #define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \ |
254 | css_for_each_descendant_pre((pos_css), &(root_cs)->css) \ | |
255 | if (is_cpuset_online(((des_cs) = css_cs((pos_css))))) | |
fc560a26 | 256 | |
1da177e4 | 257 | /* |
8447a0fe VD |
258 | * There are two global locks guarding cpuset structures - cpuset_mutex and |
259 | * callback_lock. We also require taking task_lock() when dereferencing a | |
260 | * task's cpuset pointer. See "The task_lock() exception", at the end of this | |
261 | * comment. | |
5d21cc2d | 262 | * |
8447a0fe | 263 | * A task must hold both locks to modify cpusets. If a task holds |
5d21cc2d | 264 | * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it |
8447a0fe | 265 | * is the only task able to also acquire callback_lock and be able to |
5d21cc2d TH |
266 | * modify cpusets. It can perform various checks on the cpuset structure |
267 | * first, knowing nothing will change. It can also allocate memory while | |
268 | * just holding cpuset_mutex. While it is performing these checks, various | |
8447a0fe VD |
269 | * callback routines can briefly acquire callback_lock to query cpusets. |
270 | * Once it is ready to make the changes, it takes callback_lock, blocking | |
5d21cc2d | 271 | * everyone else. |
053199ed PJ |
272 | * |
273 | * Calls to the kernel memory allocator can not be made while holding | |
8447a0fe | 274 | * callback_lock, as that would risk double tripping on callback_lock |
053199ed PJ |
275 | * from one of the callbacks into the cpuset code from within |
276 | * __alloc_pages(). | |
277 | * | |
8447a0fe | 278 | * If a task is only holding callback_lock, then it has read-only |
053199ed PJ |
279 | * access to cpusets. |
280 | * | |
58568d2a MX |
281 | * Now, the task_struct fields mems_allowed and mempolicy may be changed |
282 | * by other task, we use alloc_lock in the task_struct fields to protect | |
283 | * them. | |
053199ed | 284 | * |
8447a0fe | 285 | * The cpuset_common_file_read() handlers only hold callback_lock across |
053199ed PJ |
286 | * small pieces of code, such as when reading out possibly multi-word |
287 | * cpumasks and nodemasks. | |
288 | * | |
2df167a3 PM |
289 | * Accessing a task's cpuset should be done in accordance with the |
290 | * guidelines for accessing subsystem state in kernel/cgroup.c | |
1da177e4 LT |
291 | */ |
292 | ||
5d21cc2d | 293 | static DEFINE_MUTEX(cpuset_mutex); |
8447a0fe | 294 | static DEFINE_SPINLOCK(callback_lock); |
4247bdc6 | 295 | |
fff4dc84 TH |
296 | static struct workqueue_struct *cpuset_migrate_mm_wq; |
297 | ||
3a5a6d0c TH |
298 | /* |
299 | * CPU / memory hotplug is handled asynchronously. | |
300 | */ | |
301 | static void cpuset_hotplug_workfn(struct work_struct *work); | |
3a5a6d0c TH |
302 | static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn); |
303 | ||
e44193d3 LZ |
304 | static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq); |
305 | ||
cf417141 MK |
306 | /* |
307 | * This is ugly, but preserves the userspace API for existing cpuset | |
8793d854 | 308 | * users. If someone tries to mount the "cpuset" filesystem, we |
cf417141 MK |
309 | * silently switch it to mount "cgroup" instead |
310 | */ | |
f7e83571 AV |
311 | static struct dentry *cpuset_mount(struct file_system_type *fs_type, |
312 | int flags, const char *unused_dev_name, void *data) | |
1da177e4 | 313 | { |
8793d854 | 314 | struct file_system_type *cgroup_fs = get_fs_type("cgroup"); |
f7e83571 | 315 | struct dentry *ret = ERR_PTR(-ENODEV); |
8793d854 PM |
316 | if (cgroup_fs) { |
317 | char mountopts[] = | |
318 | "cpuset,noprefix," | |
319 | "release_agent=/sbin/cpuset_release_agent"; | |
f7e83571 AV |
320 | ret = cgroup_fs->mount(cgroup_fs, flags, |
321 | unused_dev_name, mountopts); | |
8793d854 PM |
322 | put_filesystem(cgroup_fs); |
323 | } | |
324 | return ret; | |
1da177e4 LT |
325 | } |
326 | ||
327 | static struct file_system_type cpuset_fs_type = { | |
328 | .name = "cpuset", | |
f7e83571 | 329 | .mount = cpuset_mount, |
1da177e4 LT |
330 | }; |
331 | ||
1cac41cb MB |
332 | int is_top_task(struct task_struct *p) |
333 | { | |
334 | struct cpuset *cpuset_for_task; | |
335 | int ret; | |
336 | ||
337 | rcu_read_lock(); | |
338 | cpuset_for_task = task_cs(p); | |
339 | ret = is_family_boost_enabled(cpuset_for_task); | |
340 | rcu_read_unlock(); | |
341 | ||
342 | return ret; | |
343 | } | |
344 | EXPORT_SYMBOL(is_top_task); | |
345 | ||
1da177e4 | 346 | /* |
300ed6cb | 347 | * Return in pmask the portion of a cpusets's cpus_allowed that |
1da177e4 | 348 | * are online. If none are online, walk up the cpuset hierarchy |
8132ffc9 | 349 | * until we find one that does have some online cpus. |
1da177e4 LT |
350 | * |
351 | * One way or another, we guarantee to return some non-empty subset | |
5f054e31 | 352 | * of cpu_online_mask. |
1da177e4 | 353 | * |
8447a0fe | 354 | * Call with callback_lock or cpuset_mutex held. |
1da177e4 | 355 | */ |
c9710d80 | 356 | static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask) |
1da177e4 | 357 | { |
8132ffc9 | 358 | while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) { |
c431069f | 359 | cs = parent_cs(cs); |
8132ffc9 JP |
360 | if (unlikely(!cs)) { |
361 | /* | |
362 | * The top cpuset doesn't have any online cpu as a | |
363 | * consequence of a race between cpuset_hotplug_work | |
364 | * and cpu hotplug notifier. But we know the top | |
365 | * cpuset's effective_cpus is on its way to to be | |
366 | * identical to cpu_online_mask. | |
367 | */ | |
368 | cpumask_copy(pmask, cpu_online_mask); | |
369 | return; | |
370 | } | |
371 | } | |
ae1c8023 | 372 | cpumask_and(pmask, cs->effective_cpus, cpu_online_mask); |
1da177e4 LT |
373 | } |
374 | ||
375 | /* | |
376 | * Return in *pmask the portion of a cpusets's mems_allowed that | |
0e1e7c7a CL |
377 | * are online, with memory. If none are online with memory, walk |
378 | * up the cpuset hierarchy until we find one that does have some | |
40df2deb | 379 | * online mems. The top cpuset always has some mems online. |
1da177e4 LT |
380 | * |
381 | * One way or another, we guarantee to return some non-empty subset | |
38d7bee9 | 382 | * of node_states[N_MEMORY]. |
1da177e4 | 383 | * |
8447a0fe | 384 | * Call with callback_lock or cpuset_mutex held. |
1da177e4 | 385 | */ |
c9710d80 | 386 | static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) |
1da177e4 | 387 | { |
ae1c8023 | 388 | while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) |
c431069f | 389 | cs = parent_cs(cs); |
ae1c8023 | 390 | nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); |
1da177e4 LT |
391 | } |
392 | ||
f3b39d47 MX |
393 | /* |
394 | * update task's spread flag if cpuset's page/slab spread flag is set | |
395 | * | |
8447a0fe | 396 | * Call with callback_lock or cpuset_mutex held. |
f3b39d47 MX |
397 | */ |
398 | static void cpuset_update_task_spread_flag(struct cpuset *cs, | |
399 | struct task_struct *tsk) | |
400 | { | |
401 | if (is_spread_page(cs)) | |
2ad654bc | 402 | task_set_spread_page(tsk); |
f3b39d47 | 403 | else |
2ad654bc ZL |
404 | task_clear_spread_page(tsk); |
405 | ||
f3b39d47 | 406 | if (is_spread_slab(cs)) |
2ad654bc | 407 | task_set_spread_slab(tsk); |
f3b39d47 | 408 | else |
2ad654bc | 409 | task_clear_spread_slab(tsk); |
f3b39d47 MX |
410 | } |
411 | ||
1da177e4 LT |
412 | /* |
413 | * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q? | |
414 | * | |
415 | * One cpuset is a subset of another if all its allowed CPUs and | |
416 | * Memory Nodes are a subset of the other, and its exclusive flags | |
5d21cc2d | 417 | * are only set if the other's are set. Call holding cpuset_mutex. |
1da177e4 LT |
418 | */ |
419 | ||
420 | static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) | |
421 | { | |
cd55f3c2 | 422 | return cpumask_subset(p->cpus_requested, q->cpus_requested) && |
1da177e4 LT |
423 | nodes_subset(p->mems_allowed, q->mems_allowed) && |
424 | is_cpu_exclusive(p) <= is_cpu_exclusive(q) && | |
425 | is_mem_exclusive(p) <= is_mem_exclusive(q); | |
426 | } | |
427 | ||
645fcc9d LZ |
428 | /** |
429 | * alloc_trial_cpuset - allocate a trial cpuset | |
430 | * @cs: the cpuset that the trial cpuset duplicates | |
431 | */ | |
c9710d80 | 432 | static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) |
645fcc9d | 433 | { |
300ed6cb LZ |
434 | struct cpuset *trial; |
435 | ||
436 | trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL); | |
437 | if (!trial) | |
438 | return NULL; | |
439 | ||
e2b9a3d7 LZ |
440 | if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL)) |
441 | goto free_cs; | |
442 | if (!alloc_cpumask_var(&trial->effective_cpus, GFP_KERNEL)) | |
443 | goto free_cpus; | |
300ed6cb | 444 | |
e2b9a3d7 LZ |
445 | cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); |
446 | cpumask_copy(trial->effective_cpus, cs->effective_cpus); | |
300ed6cb | 447 | return trial; |
e2b9a3d7 LZ |
448 | |
449 | free_cpus: | |
450 | free_cpumask_var(trial->cpus_allowed); | |
451 | free_cs: | |
452 | kfree(trial); | |
453 | return NULL; | |
645fcc9d LZ |
454 | } |
455 | ||
456 | /** | |
457 | * free_trial_cpuset - free the trial cpuset | |
458 | * @trial: the trial cpuset to be freed | |
459 | */ | |
460 | static void free_trial_cpuset(struct cpuset *trial) | |
461 | { | |
e2b9a3d7 | 462 | free_cpumask_var(trial->effective_cpus); |
300ed6cb | 463 | free_cpumask_var(trial->cpus_allowed); |
645fcc9d LZ |
464 | kfree(trial); |
465 | } | |
466 | ||
1da177e4 LT |
467 | /* |
468 | * validate_change() - Used to validate that any proposed cpuset change | |
469 | * follows the structural rules for cpusets. | |
470 | * | |
471 | * If we replaced the flag and mask values of the current cpuset | |
472 | * (cur) with those values in the trial cpuset (trial), would | |
473 | * our various subset and exclusive rules still be valid? Presumes | |
5d21cc2d | 474 | * cpuset_mutex held. |
1da177e4 LT |
475 | * |
476 | * 'cur' is the address of an actual, in-use cpuset. Operations | |
477 | * such as list traversal that depend on the actual address of the | |
478 | * cpuset in the list must use cur below, not trial. | |
479 | * | |
480 | * 'trial' is the address of bulk structure copy of cur, with | |
481 | * perhaps one or more of the fields cpus_allowed, mems_allowed, | |
482 | * or flags changed to new, trial values. | |
483 | * | |
484 | * Return 0 if valid, -errno if not. | |
485 | */ | |
486 | ||
c9710d80 | 487 | static int validate_change(struct cpuset *cur, struct cpuset *trial) |
1da177e4 | 488 | { |
492eb21b | 489 | struct cgroup_subsys_state *css; |
1da177e4 | 490 | struct cpuset *c, *par; |
ae8086ce TH |
491 | int ret; |
492 | ||
493 | rcu_read_lock(); | |
1da177e4 LT |
494 | |
495 | /* Each of our child cpusets must be a subset of us */ | |
ae8086ce | 496 | ret = -EBUSY; |
492eb21b | 497 | cpuset_for_each_child(c, css, cur) |
ae8086ce TH |
498 | if (!is_cpuset_subset(c, trial)) |
499 | goto out; | |
1da177e4 LT |
500 | |
501 | /* Remaining checks don't apply to root cpuset */ | |
ae8086ce | 502 | ret = 0; |
69604067 | 503 | if (cur == &top_cpuset) |
ae8086ce | 504 | goto out; |
1da177e4 | 505 | |
c431069f | 506 | par = parent_cs(cur); |
69604067 | 507 | |
7e88291b | 508 | /* On legacy hiearchy, we must be a subset of our parent cpuset. */ |
ae8086ce | 509 | ret = -EACCES; |
9e10a130 TH |
510 | if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && |
511 | !is_cpuset_subset(trial, par)) | |
ae8086ce | 512 | goto out; |
1da177e4 | 513 | |
2df167a3 PM |
514 | /* |
515 | * If either I or some sibling (!= me) is exclusive, we can't | |
516 | * overlap | |
517 | */ | |
ae8086ce | 518 | ret = -EINVAL; |
492eb21b | 519 | cpuset_for_each_child(c, css, par) { |
1da177e4 LT |
520 | if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && |
521 | c != cur && | |
cd55f3c2 | 522 | cpumask_intersects(trial->cpus_requested, c->cpus_requested)) |
ae8086ce | 523 | goto out; |
1da177e4 LT |
524 | if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) && |
525 | c != cur && | |
526 | nodes_intersects(trial->mems_allowed, c->mems_allowed)) | |
ae8086ce | 527 | goto out; |
1da177e4 LT |
528 | } |
529 | ||
452477fa TH |
530 | /* |
531 | * Cpusets with tasks - existing or newly being attached - can't | |
1c09b195 | 532 | * be changed to have empty cpus_allowed or mems_allowed. |
452477fa | 533 | */ |
ae8086ce | 534 | ret = -ENOSPC; |
27bd4dbb | 535 | if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) { |
1c09b195 LZ |
536 | if (!cpumask_empty(cur->cpus_allowed) && |
537 | cpumask_empty(trial->cpus_allowed)) | |
538 | goto out; | |
539 | if (!nodes_empty(cur->mems_allowed) && | |
540 | nodes_empty(trial->mems_allowed)) | |
541 | goto out; | |
542 | } | |
020958b6 | 543 | |
f82f8042 JL |
544 | /* |
545 | * We can't shrink if we won't have enough room for SCHED_DEADLINE | |
546 | * tasks. | |
547 | */ | |
548 | ret = -EBUSY; | |
549 | if (is_cpu_exclusive(cur) && | |
550 | !cpuset_cpumask_can_shrink(cur->cpus_allowed, | |
551 | trial->cpus_allowed)) | |
552 | goto out; | |
553 | ||
ae8086ce TH |
554 | ret = 0; |
555 | out: | |
556 | rcu_read_unlock(); | |
557 | return ret; | |
1da177e4 LT |
558 | } |
559 | ||
db7f47cf | 560 | #ifdef CONFIG_SMP |
029190c5 | 561 | /* |
cf417141 | 562 | * Helper routine for generate_sched_domains(). |
8b5f1c52 | 563 | * Do cpusets a, b have overlapping effective cpus_allowed masks? |
029190c5 | 564 | */ |
029190c5 PJ |
565 | static int cpusets_overlap(struct cpuset *a, struct cpuset *b) |
566 | { | |
8b5f1c52 | 567 | return cpumask_intersects(a->effective_cpus, b->effective_cpus); |
029190c5 PJ |
568 | } |
569 | ||
1d3504fc HS |
570 | static void |
571 | update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c) | |
572 | { | |
1d3504fc HS |
573 | if (dattr->relax_domain_level < c->relax_domain_level) |
574 | dattr->relax_domain_level = c->relax_domain_level; | |
575 | return; | |
576 | } | |
577 | ||
fc560a26 TH |
578 | static void update_domain_attr_tree(struct sched_domain_attr *dattr, |
579 | struct cpuset *root_cs) | |
f5393693 | 580 | { |
fc560a26 | 581 | struct cpuset *cp; |
492eb21b | 582 | struct cgroup_subsys_state *pos_css; |
f5393693 | 583 | |
fc560a26 | 584 | rcu_read_lock(); |
492eb21b | 585 | cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { |
fc560a26 TH |
586 | /* skip the whole subtree if @cp doesn't have any CPU */ |
587 | if (cpumask_empty(cp->cpus_allowed)) { | |
492eb21b | 588 | pos_css = css_rightmost_descendant(pos_css); |
f5393693 | 589 | continue; |
fc560a26 | 590 | } |
f5393693 LJ |
591 | |
592 | if (is_sched_load_balance(cp)) | |
593 | update_domain_attr(dattr, cp); | |
f5393693 | 594 | } |
fc560a26 | 595 | rcu_read_unlock(); |
f5393693 LJ |
596 | } |
597 | ||
029190c5 | 598 | /* |
cf417141 MK |
599 | * generate_sched_domains() |
600 | * | |
601 | * This function builds a partial partition of the systems CPUs | |
602 | * A 'partial partition' is a set of non-overlapping subsets whose | |
603 | * union is a subset of that set. | |
0a0fca9d | 604 | * The output of this function needs to be passed to kernel/sched/core.c |
cf417141 MK |
605 | * partition_sched_domains() routine, which will rebuild the scheduler's |
606 | * load balancing domains (sched domains) as specified by that partial | |
607 | * partition. | |
029190c5 | 608 | * |
45ce80fb | 609 | * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt |
029190c5 PJ |
610 | * for a background explanation of this. |
611 | * | |
612 | * Does not return errors, on the theory that the callers of this | |
613 | * routine would rather not worry about failures to rebuild sched | |
614 | * domains when operating in the severe memory shortage situations | |
615 | * that could cause allocation failures below. | |
616 | * | |
5d21cc2d | 617 | * Must be called with cpuset_mutex held. |
029190c5 PJ |
618 | * |
619 | * The three key local variables below are: | |
aeed6824 | 620 | * q - a linked-list queue of cpuset pointers, used to implement a |
029190c5 PJ |
621 | * top-down scan of all cpusets. This scan loads a pointer |
622 | * to each cpuset marked is_sched_load_balance into the | |
623 | * array 'csa'. For our purposes, rebuilding the schedulers | |
624 | * sched domains, we can ignore !is_sched_load_balance cpusets. | |
625 | * csa - (for CpuSet Array) Array of pointers to all the cpusets | |
626 | * that need to be load balanced, for convenient iterative | |
627 | * access by the subsequent code that finds the best partition, | |
628 | * i.e the set of domains (subsets) of CPUs such that the | |
629 | * cpus_allowed of every cpuset marked is_sched_load_balance | |
630 | * is a subset of one of these domains, while there are as | |
631 | * many such domains as possible, each as small as possible. | |
632 | * doms - Conversion of 'csa' to an array of cpumasks, for passing to | |
0a0fca9d | 633 | * the kernel/sched/core.c routine partition_sched_domains() in a |
029190c5 PJ |
634 | * convenient format, that can be easily compared to the prior |
635 | * value to determine what partition elements (sched domains) | |
636 | * were changed (added or removed.) | |
637 | * | |
638 | * Finding the best partition (set of domains): | |
639 | * The triple nested loops below over i, j, k scan over the | |
640 | * load balanced cpusets (using the array of cpuset pointers in | |
641 | * csa[]) looking for pairs of cpusets that have overlapping | |
642 | * cpus_allowed, but which don't have the same 'pn' partition | |
643 | * number and gives them in the same partition number. It keeps | |
644 | * looping on the 'restart' label until it can no longer find | |
645 | * any such pairs. | |
646 | * | |
647 | * The union of the cpus_allowed masks from the set of | |
648 | * all cpusets having the same 'pn' value then form the one | |
649 | * element of the partition (one sched domain) to be passed to | |
650 | * partition_sched_domains(). | |
651 | */ | |
acc3f5d7 | 652 | static int generate_sched_domains(cpumask_var_t **domains, |
cf417141 | 653 | struct sched_domain_attr **attributes) |
029190c5 | 654 | { |
029190c5 PJ |
655 | struct cpuset *cp; /* scans q */ |
656 | struct cpuset **csa; /* array of all cpuset ptrs */ | |
657 | int csn; /* how many cpuset ptrs in csa so far */ | |
658 | int i, j, k; /* indices for partition finding loops */ | |
acc3f5d7 | 659 | cpumask_var_t *doms; /* resulting partition; i.e. sched domains */ |
47b8ea71 | 660 | cpumask_var_t non_isolated_cpus; /* load balanced CPUs */ |
1d3504fc | 661 | struct sched_domain_attr *dattr; /* attributes for custom domains */ |
1583715d | 662 | int ndoms = 0; /* number of sched domains in result */ |
6af866af | 663 | int nslot; /* next empty doms[] struct cpumask slot */ |
492eb21b | 664 | struct cgroup_subsys_state *pos_css; |
029190c5 | 665 | |
029190c5 | 666 | doms = NULL; |
1d3504fc | 667 | dattr = NULL; |
cf417141 | 668 | csa = NULL; |
029190c5 | 669 | |
47b8ea71 RR |
670 | if (!alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL)) |
671 | goto done; | |
672 | cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); | |
673 | ||
029190c5 PJ |
674 | /* Special case for the 99% of systems with one, full, sched domain */ |
675 | if (is_sched_load_balance(&top_cpuset)) { | |
acc3f5d7 RR |
676 | ndoms = 1; |
677 | doms = alloc_sched_domains(ndoms); | |
029190c5 | 678 | if (!doms) |
cf417141 MK |
679 | goto done; |
680 | ||
1d3504fc HS |
681 | dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL); |
682 | if (dattr) { | |
683 | *dattr = SD_ATTR_INIT; | |
93a65575 | 684 | update_domain_attr_tree(dattr, &top_cpuset); |
1d3504fc | 685 | } |
47b8ea71 RR |
686 | cpumask_and(doms[0], top_cpuset.effective_cpus, |
687 | non_isolated_cpus); | |
cf417141 | 688 | |
cf417141 | 689 | goto done; |
029190c5 PJ |
690 | } |
691 | ||
664eedde | 692 | csa = kmalloc(nr_cpusets() * sizeof(cp), GFP_KERNEL); |
029190c5 PJ |
693 | if (!csa) |
694 | goto done; | |
695 | csn = 0; | |
696 | ||
fc560a26 | 697 | rcu_read_lock(); |
492eb21b | 698 | cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) { |
bd8815a6 TH |
699 | if (cp == &top_cpuset) |
700 | continue; | |
f5393693 | 701 | /* |
fc560a26 TH |
702 | * Continue traversing beyond @cp iff @cp has some CPUs and |
703 | * isn't load balancing. The former is obvious. The | |
704 | * latter: All child cpusets contain a subset of the | |
705 | * parent's cpus, so just skip them, and then we call | |
706 | * update_domain_attr_tree() to calc relax_domain_level of | |
707 | * the corresponding sched domain. | |
f5393693 | 708 | */ |
fc560a26 | 709 | if (!cpumask_empty(cp->cpus_allowed) && |
47b8ea71 RR |
710 | !(is_sched_load_balance(cp) && |
711 | cpumask_intersects(cp->cpus_allowed, non_isolated_cpus))) | |
f5393693 | 712 | continue; |
489a5393 | 713 | |
fc560a26 TH |
714 | if (is_sched_load_balance(cp)) |
715 | csa[csn++] = cp; | |
716 | ||
717 | /* skip @cp's subtree */ | |
492eb21b | 718 | pos_css = css_rightmost_descendant(pos_css); |
fc560a26 TH |
719 | } |
720 | rcu_read_unlock(); | |
029190c5 PJ |
721 | |
722 | for (i = 0; i < csn; i++) | |
723 | csa[i]->pn = i; | |
724 | ndoms = csn; | |
725 | ||
726 | restart: | |
727 | /* Find the best partition (set of sched domains) */ | |
728 | for (i = 0; i < csn; i++) { | |
729 | struct cpuset *a = csa[i]; | |
730 | int apn = a->pn; | |
731 | ||
732 | for (j = 0; j < csn; j++) { | |
733 | struct cpuset *b = csa[j]; | |
734 | int bpn = b->pn; | |
735 | ||
736 | if (apn != bpn && cpusets_overlap(a, b)) { | |
737 | for (k = 0; k < csn; k++) { | |
738 | struct cpuset *c = csa[k]; | |
739 | ||
740 | if (c->pn == bpn) | |
741 | c->pn = apn; | |
742 | } | |
743 | ndoms--; /* one less element */ | |
744 | goto restart; | |
745 | } | |
746 | } | |
747 | } | |
748 | ||
cf417141 MK |
749 | /* |
750 | * Now we know how many domains to create. | |
751 | * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. | |
752 | */ | |
acc3f5d7 | 753 | doms = alloc_sched_domains(ndoms); |
700018e0 | 754 | if (!doms) |
cf417141 | 755 | goto done; |
cf417141 MK |
756 | |
757 | /* | |
758 | * The rest of the code, including the scheduler, can deal with | |
759 | * dattr==NULL case. No need to abort if alloc fails. | |
760 | */ | |
1d3504fc | 761 | dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL); |
029190c5 PJ |
762 | |
763 | for (nslot = 0, i = 0; i < csn; i++) { | |
764 | struct cpuset *a = csa[i]; | |
6af866af | 765 | struct cpumask *dp; |
029190c5 PJ |
766 | int apn = a->pn; |
767 | ||
cf417141 MK |
768 | if (apn < 0) { |
769 | /* Skip completed partitions */ | |
770 | continue; | |
771 | } | |
772 | ||
acc3f5d7 | 773 | dp = doms[nslot]; |
cf417141 MK |
774 | |
775 | if (nslot == ndoms) { | |
776 | static int warnings = 10; | |
777 | if (warnings) { | |
12d3089c FF |
778 | pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n", |
779 | nslot, ndoms, csn, i, apn); | |
cf417141 | 780 | warnings--; |
029190c5 | 781 | } |
cf417141 MK |
782 | continue; |
783 | } | |
029190c5 | 784 | |
6af866af | 785 | cpumask_clear(dp); |
cf417141 MK |
786 | if (dattr) |
787 | *(dattr + nslot) = SD_ATTR_INIT; | |
788 | for (j = i; j < csn; j++) { | |
789 | struct cpuset *b = csa[j]; | |
790 | ||
791 | if (apn == b->pn) { | |
8b5f1c52 | 792 | cpumask_or(dp, dp, b->effective_cpus); |
47b8ea71 | 793 | cpumask_and(dp, dp, non_isolated_cpus); |
cf417141 MK |
794 | if (dattr) |
795 | update_domain_attr_tree(dattr + nslot, b); | |
796 | ||
797 | /* Done with this partition */ | |
798 | b->pn = -1; | |
029190c5 | 799 | } |
029190c5 | 800 | } |
cf417141 | 801 | nslot++; |
029190c5 PJ |
802 | } |
803 | BUG_ON(nslot != ndoms); | |
804 | ||
cf417141 | 805 | done: |
47b8ea71 | 806 | free_cpumask_var(non_isolated_cpus); |
cf417141 MK |
807 | kfree(csa); |
808 | ||
700018e0 LZ |
809 | /* |
810 | * Fallback to the default domain if kmalloc() failed. | |
811 | * See comments in partition_sched_domains(). | |
812 | */ | |
813 | if (doms == NULL) | |
814 | ndoms = 1; | |
815 | ||
cf417141 MK |
816 | *domains = doms; |
817 | *attributes = dattr; | |
818 | return ndoms; | |
819 | } | |
820 | ||
821 | /* | |
822 | * Rebuild scheduler domains. | |
823 | * | |
699140ba TH |
824 | * If the flag 'sched_load_balance' of any cpuset with non-empty |
825 | * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset | |
826 | * which has that flag enabled, or if any cpuset with a non-empty | |
827 | * 'cpus' is removed, then call this routine to rebuild the | |
828 | * scheduler's dynamic sched domains. | |
cf417141 | 829 | * |
5d21cc2d | 830 | * Call with cpuset_mutex held. Takes get_online_cpus(). |
cf417141 | 831 | */ |
699140ba | 832 | static void rebuild_sched_domains_locked(void) |
cf417141 MK |
833 | { |
834 | struct sched_domain_attr *attr; | |
acc3f5d7 | 835 | cpumask_var_t *doms; |
cf417141 MK |
836 | int ndoms; |
837 | ||
5d21cc2d | 838 | lockdep_assert_held(&cpuset_mutex); |
86ef5c9a | 839 | get_online_cpus(); |
cf417141 | 840 | |
5b16c2a4 LZ |
841 | /* |
842 | * We have raced with CPU hotplug. Don't do anything to avoid | |
843 | * passing doms with offlined cpu to partition_sched_domains(). | |
844 | * Anyways, hotplug work item will rebuild sched domains. | |
845 | */ | |
8b5f1c52 | 846 | if (!cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask)) |
5b16c2a4 LZ |
847 | goto out; |
848 | ||
cf417141 | 849 | /* Generate domain masks and attrs */ |
cf417141 | 850 | ndoms = generate_sched_domains(&doms, &attr); |
cf417141 MK |
851 | |
852 | /* Have scheduler rebuild the domains */ | |
853 | partition_sched_domains(ndoms, doms, attr); | |
5b16c2a4 | 854 | out: |
86ef5c9a | 855 | put_online_cpus(); |
cf417141 | 856 | } |
db7f47cf | 857 | #else /* !CONFIG_SMP */ |
699140ba | 858 | static void rebuild_sched_domains_locked(void) |
db7f47cf PM |
859 | { |
860 | } | |
db7f47cf | 861 | #endif /* CONFIG_SMP */ |
029190c5 | 862 | |
cf417141 MK |
863 | void rebuild_sched_domains(void) |
864 | { | |
5d21cc2d | 865 | mutex_lock(&cpuset_mutex); |
699140ba | 866 | rebuild_sched_domains_locked(); |
5d21cc2d | 867 | mutex_unlock(&cpuset_mutex); |
029190c5 PJ |
868 | } |
869 | ||
0b2f630a MX |
870 | /** |
871 | * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. | |
872 | * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed | |
0b2f630a | 873 | * |
d66393e5 TH |
874 | * Iterate through each task of @cs updating its cpus_allowed to the |
875 | * effective cpuset's. As this function is called with cpuset_mutex held, | |
876 | * cpuset membership stays stable. | |
0b2f630a | 877 | */ |
d66393e5 | 878 | static void update_tasks_cpumask(struct cpuset *cs) |
0b2f630a | 879 | { |
d66393e5 TH |
880 | struct css_task_iter it; |
881 | struct task_struct *task; | |
882 | ||
883 | css_task_iter_start(&cs->css, &it); | |
884 | while ((task = css_task_iter_next(&it))) | |
ae1c8023 | 885 | set_cpus_allowed_ptr(task, cs->effective_cpus); |
d66393e5 | 886 | css_task_iter_end(&it); |
0b2f630a MX |
887 | } |
888 | ||
5c5cc623 | 889 | /* |
734d4513 LZ |
890 | * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree |
891 | * @cs: the cpuset to consider | |
892 | * @new_cpus: temp variable for calculating new effective_cpus | |
893 | * | |
894 | * When congifured cpumask is changed, the effective cpumasks of this cpuset | |
895 | * and all its descendants need to be updated. | |
5c5cc623 | 896 | * |
734d4513 | 897 | * On legacy hierachy, effective_cpus will be the same with cpu_allowed. |
5c5cc623 LZ |
898 | * |
899 | * Called with cpuset_mutex held | |
900 | */ | |
734d4513 | 901 | static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus) |
5c5cc623 LZ |
902 | { |
903 | struct cpuset *cp; | |
492eb21b | 904 | struct cgroup_subsys_state *pos_css; |
8b5f1c52 | 905 | bool need_rebuild_sched_domains = false; |
5c5cc623 LZ |
906 | |
907 | rcu_read_lock(); | |
734d4513 LZ |
908 | cpuset_for_each_descendant_pre(cp, pos_css, cs) { |
909 | struct cpuset *parent = parent_cs(cp); | |
910 | ||
911 | cpumask_and(new_cpus, cp->cpus_allowed, parent->effective_cpus); | |
912 | ||
554b0d1c LZ |
913 | /* |
914 | * If it becomes empty, inherit the effective mask of the | |
915 | * parent, which is guaranteed to have some CPUs. | |
916 | */ | |
9e10a130 TH |
917 | if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && |
918 | cpumask_empty(new_cpus)) | |
554b0d1c LZ |
919 | cpumask_copy(new_cpus, parent->effective_cpus); |
920 | ||
734d4513 LZ |
921 | /* Skip the whole subtree if the cpumask remains the same. */ |
922 | if (cpumask_equal(new_cpus, cp->effective_cpus)) { | |
923 | pos_css = css_rightmost_descendant(pos_css); | |
924 | continue; | |
5c5cc623 | 925 | } |
734d4513 | 926 | |
ec903c0c | 927 | if (!css_tryget_online(&cp->css)) |
5c5cc623 LZ |
928 | continue; |
929 | rcu_read_unlock(); | |
930 | ||
8447a0fe | 931 | spin_lock_irq(&callback_lock); |
734d4513 | 932 | cpumask_copy(cp->effective_cpus, new_cpus); |
8447a0fe | 933 | spin_unlock_irq(&callback_lock); |
734d4513 | 934 | |
9e10a130 | 935 | WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && |
734d4513 LZ |
936 | !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); |
937 | ||
d66393e5 | 938 | update_tasks_cpumask(cp); |
5c5cc623 | 939 | |
8b5f1c52 LZ |
940 | /* |
941 | * If the effective cpumask of any non-empty cpuset is changed, | |
942 | * we need to rebuild sched domains. | |
943 | */ | |
944 | if (!cpumask_empty(cp->cpus_allowed) && | |
945 | is_sched_load_balance(cp)) | |
946 | need_rebuild_sched_domains = true; | |
947 | ||
5c5cc623 LZ |
948 | rcu_read_lock(); |
949 | css_put(&cp->css); | |
950 | } | |
951 | rcu_read_unlock(); | |
8b5f1c52 LZ |
952 | |
953 | if (need_rebuild_sched_domains) | |
954 | rebuild_sched_domains_locked(); | |
5c5cc623 LZ |
955 | } |
956 | ||
58f4790b CW |
957 | /** |
958 | * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it | |
959 | * @cs: the cpuset to consider | |
fc34ac1d | 960 | * @trialcs: trial cpuset |
58f4790b CW |
961 | * @buf: buffer of cpu numbers written to this cpuset |
962 | */ | |
645fcc9d LZ |
963 | static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, |
964 | const char *buf) | |
1da177e4 | 965 | { |
58f4790b | 966 | int retval; |
1da177e4 | 967 | |
5f054e31 | 968 | /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */ |
4c4d50f7 PJ |
969 | if (cs == &top_cpuset) |
970 | return -EACCES; | |
971 | ||
6f7f02e7 | 972 | /* |
c8d9c90c | 973 | * An empty cpus_allowed is ok only if the cpuset has no tasks. |
020958b6 PJ |
974 | * Since cpulist_parse() fails on an empty mask, we special case |
975 | * that parsing. The validate_change() call ensures that cpusets | |
976 | * with tasks have cpus. | |
6f7f02e7 | 977 | */ |
020958b6 | 978 | if (!*buf) { |
300ed6cb | 979 | cpumask_clear(trialcs->cpus_allowed); |
6f7f02e7 | 980 | } else { |
cd55f3c2 | 981 | retval = cpulist_parse(buf, trialcs->cpus_requested); |
6f7f02e7 DR |
982 | if (retval < 0) |
983 | return retval; | |
37340746 | 984 | |
cd55f3c2 | 985 | if (!cpumask_subset(trialcs->cpus_requested, cpu_present_mask)) |
37340746 | 986 | return -EINVAL; |
cd55f3c2 RA |
987 | |
988 | cpumask_and(trialcs->cpus_allowed, trialcs->cpus_requested, cpu_active_mask); | |
6f7f02e7 | 989 | } |
029190c5 | 990 | |
8707d8b8 | 991 | /* Nothing to do if the cpus didn't change */ |
cd55f3c2 | 992 | if (cpumask_equal(cs->cpus_requested, trialcs->cpus_requested)) |
8707d8b8 | 993 | return 0; |
58f4790b | 994 | |
a73456f3 LZ |
995 | retval = validate_change(cs, trialcs); |
996 | if (retval < 0) | |
997 | return retval; | |
998 | ||
8447a0fe | 999 | spin_lock_irq(&callback_lock); |
300ed6cb | 1000 | cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); |
cd55f3c2 | 1001 | cpumask_copy(cs->cpus_requested, trialcs->cpus_requested); |
8447a0fe | 1002 | spin_unlock_irq(&callback_lock); |
029190c5 | 1003 | |
734d4513 LZ |
1004 | /* use trialcs->cpus_allowed as a temp variable */ |
1005 | update_cpumasks_hier(cs, trialcs->cpus_allowed); | |
85d7b949 | 1006 | return 0; |
1da177e4 LT |
1007 | } |
1008 | ||
e4e364e8 | 1009 | /* |
fff4dc84 TH |
1010 | * Migrate memory region from one set of nodes to another. This is |
1011 | * performed asynchronously as it can be called from process migration path | |
1012 | * holding locks involved in process management. All mm migrations are | |
1013 | * performed in the queued order and can be waited for by flushing | |
1014 | * cpuset_migrate_mm_wq. | |
e4e364e8 PJ |
1015 | */ |
1016 | ||
fff4dc84 TH |
1017 | struct cpuset_migrate_mm_work { |
1018 | struct work_struct work; | |
1019 | struct mm_struct *mm; | |
1020 | nodemask_t from; | |
1021 | nodemask_t to; | |
1022 | }; | |
1023 | ||
1024 | static void cpuset_migrate_mm_workfn(struct work_struct *work) | |
1025 | { | |
1026 | struct cpuset_migrate_mm_work *mwork = | |
1027 | container_of(work, struct cpuset_migrate_mm_work, work); | |
1028 | ||
1029 | /* on a wq worker, no need to worry about %current's mems_allowed */ | |
1030 | do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL); | |
1031 | mmput(mwork->mm); | |
1032 | kfree(mwork); | |
1033 | } | |
1034 | ||
e4e364e8 PJ |
1035 | static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, |
1036 | const nodemask_t *to) | |
1037 | { | |
fff4dc84 | 1038 | struct cpuset_migrate_mm_work *mwork; |
e4e364e8 | 1039 | |
fff4dc84 TH |
1040 | mwork = kzalloc(sizeof(*mwork), GFP_KERNEL); |
1041 | if (mwork) { | |
1042 | mwork->mm = mm; | |
1043 | mwork->from = *from; | |
1044 | mwork->to = *to; | |
1045 | INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn); | |
1046 | queue_work(cpuset_migrate_mm_wq, &mwork->work); | |
1047 | } else { | |
1048 | mmput(mm); | |
1049 | } | |
1050 | } | |
e4e364e8 | 1051 | |
d5209747 | 1052 | static void cpuset_post_attach(void) |
fff4dc84 TH |
1053 | { |
1054 | flush_workqueue(cpuset_migrate_mm_wq); | |
e4e364e8 PJ |
1055 | } |
1056 | ||
3b6766fe | 1057 | /* |
58568d2a MX |
1058 | * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy |
1059 | * @tsk: the task to change | |
1060 | * @newmems: new nodes that the task will be set | |
1061 | * | |
1062 | * In order to avoid seeing no nodes if the old and new nodes are disjoint, | |
1063 | * we structure updates as setting all new allowed nodes, then clearing newly | |
1064 | * disallowed ones. | |
58568d2a MX |
1065 | */ |
1066 | static void cpuset_change_task_nodemask(struct task_struct *tsk, | |
1067 | nodemask_t *newmems) | |
1068 | { | |
b246272e | 1069 | bool need_loop; |
89e8a244 | 1070 | |
c0ff7453 MX |
1071 | /* |
1072 | * Allow tasks that have access to memory reserves because they have | |
1073 | * been OOM killed to get memory anywhere. | |
1074 | */ | |
1075 | if (unlikely(test_thread_flag(TIF_MEMDIE))) | |
1076 | return; | |
1077 | if (current->flags & PF_EXITING) /* Let dying task have memory */ | |
1078 | return; | |
1079 | ||
1080 | task_lock(tsk); | |
b246272e DR |
1081 | /* |
1082 | * Determine if a loop is necessary if another thread is doing | |
d26914d1 | 1083 | * read_mems_allowed_begin(). If at least one node remains unchanged and |
b246272e DR |
1084 | * tsk does not have a mempolicy, then an empty nodemask will not be |
1085 | * possible when mems_allowed is larger than a word. | |
1086 | */ | |
1087 | need_loop = task_has_mempolicy(tsk) || | |
1088 | !nodes_intersects(*newmems, tsk->mems_allowed); | |
c0ff7453 | 1089 | |
0fc0287c PZ |
1090 | if (need_loop) { |
1091 | local_irq_disable(); | |
cc9a6c87 | 1092 | write_seqcount_begin(&tsk->mems_allowed_seq); |
0fc0287c | 1093 | } |
c0ff7453 | 1094 | |
cc9a6c87 MG |
1095 | nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); |
1096 | mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1); | |
c0ff7453 MX |
1097 | |
1098 | mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2); | |
58568d2a | 1099 | tsk->mems_allowed = *newmems; |
cc9a6c87 | 1100 | |
0fc0287c | 1101 | if (need_loop) { |
cc9a6c87 | 1102 | write_seqcount_end(&tsk->mems_allowed_seq); |
0fc0287c PZ |
1103 | local_irq_enable(); |
1104 | } | |
cc9a6c87 | 1105 | |
c0ff7453 | 1106 | task_unlock(tsk); |
58568d2a MX |
1107 | } |
1108 | ||
8793d854 PM |
1109 | static void *cpuset_being_rebound; |
1110 | ||
0b2f630a MX |
1111 | /** |
1112 | * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset. | |
1113 | * @cs: the cpuset in which each task's mems_allowed mask needs to be changed | |
0b2f630a | 1114 | * |
d66393e5 TH |
1115 | * Iterate through each task of @cs updating its mems_allowed to the |
1116 | * effective cpuset's. As this function is called with cpuset_mutex held, | |
1117 | * cpuset membership stays stable. | |
0b2f630a | 1118 | */ |
d66393e5 | 1119 | static void update_tasks_nodemask(struct cpuset *cs) |
1da177e4 | 1120 | { |
33ad801d | 1121 | static nodemask_t newmems; /* protected by cpuset_mutex */ |
d66393e5 TH |
1122 | struct css_task_iter it; |
1123 | struct task_struct *task; | |
59dac16f | 1124 | |
846a16bf | 1125 | cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ |
4225399a | 1126 | |
ae1c8023 | 1127 | guarantee_online_mems(cs, &newmems); |
33ad801d | 1128 | |
4225399a | 1129 | /* |
3b6766fe LZ |
1130 | * The mpol_rebind_mm() call takes mmap_sem, which we couldn't |
1131 | * take while holding tasklist_lock. Forks can happen - the | |
1132 | * mpol_dup() cpuset_being_rebound check will catch such forks, | |
1133 | * and rebind their vma mempolicies too. Because we still hold | |
5d21cc2d | 1134 | * the global cpuset_mutex, we know that no other rebind effort |
3b6766fe | 1135 | * will be contending for the global variable cpuset_being_rebound. |
4225399a | 1136 | * It's ok if we rebind the same mm twice; mpol_rebind_mm() |
04c19fa6 | 1137 | * is idempotent. Also migrate pages in each mm to new nodes. |
4225399a | 1138 | */ |
d66393e5 TH |
1139 | css_task_iter_start(&cs->css, &it); |
1140 | while ((task = css_task_iter_next(&it))) { | |
1141 | struct mm_struct *mm; | |
1142 | bool migrate; | |
1143 | ||
1144 | cpuset_change_task_nodemask(task, &newmems); | |
1145 | ||
1146 | mm = get_task_mm(task); | |
1147 | if (!mm) | |
1148 | continue; | |
1149 | ||
1150 | migrate = is_memory_migrate(cs); | |
1151 | ||
1152 | mpol_rebind_mm(mm, &cs->mems_allowed); | |
1153 | if (migrate) | |
1154 | cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); | |
fff4dc84 TH |
1155 | else |
1156 | mmput(mm); | |
d66393e5 TH |
1157 | } |
1158 | css_task_iter_end(&it); | |
4225399a | 1159 | |
33ad801d LZ |
1160 | /* |
1161 | * All the tasks' nodemasks have been updated, update | |
1162 | * cs->old_mems_allowed. | |
1163 | */ | |
1164 | cs->old_mems_allowed = newmems; | |
1165 | ||
2df167a3 | 1166 | /* We're done rebinding vmas to this cpuset's new mems_allowed. */ |
8793d854 | 1167 | cpuset_being_rebound = NULL; |
1da177e4 LT |
1168 | } |
1169 | ||
5c5cc623 | 1170 | /* |
734d4513 LZ |
1171 | * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree |
1172 | * @cs: the cpuset to consider | |
1173 | * @new_mems: a temp variable for calculating new effective_mems | |
5c5cc623 | 1174 | * |
734d4513 LZ |
1175 | * When configured nodemask is changed, the effective nodemasks of this cpuset |
1176 | * and all its descendants need to be updated. | |
5c5cc623 | 1177 | * |
734d4513 | 1178 | * On legacy hiearchy, effective_mems will be the same with mems_allowed. |
5c5cc623 LZ |
1179 | * |
1180 | * Called with cpuset_mutex held | |
1181 | */ | |
734d4513 | 1182 | static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) |
5c5cc623 LZ |
1183 | { |
1184 | struct cpuset *cp; | |
492eb21b | 1185 | struct cgroup_subsys_state *pos_css; |
5c5cc623 LZ |
1186 | |
1187 | rcu_read_lock(); | |
734d4513 LZ |
1188 | cpuset_for_each_descendant_pre(cp, pos_css, cs) { |
1189 | struct cpuset *parent = parent_cs(cp); | |
1190 | ||
1191 | nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems); | |
1192 | ||
554b0d1c LZ |
1193 | /* |
1194 | * If it becomes empty, inherit the effective mask of the | |
1195 | * parent, which is guaranteed to have some MEMs. | |
1196 | */ | |
9e10a130 TH |
1197 | if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && |
1198 | nodes_empty(*new_mems)) | |
554b0d1c LZ |
1199 | *new_mems = parent->effective_mems; |
1200 | ||
734d4513 LZ |
1201 | /* Skip the whole subtree if the nodemask remains the same. */ |
1202 | if (nodes_equal(*new_mems, cp->effective_mems)) { | |
1203 | pos_css = css_rightmost_descendant(pos_css); | |
1204 | continue; | |
5c5cc623 | 1205 | } |
734d4513 | 1206 | |
ec903c0c | 1207 | if (!css_tryget_online(&cp->css)) |
5c5cc623 LZ |
1208 | continue; |
1209 | rcu_read_unlock(); | |
1210 | ||
8447a0fe | 1211 | spin_lock_irq(&callback_lock); |
734d4513 | 1212 | cp->effective_mems = *new_mems; |
8447a0fe | 1213 | spin_unlock_irq(&callback_lock); |
734d4513 | 1214 | |
9e10a130 | 1215 | WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && |
a1381268 | 1216 | !nodes_equal(cp->mems_allowed, cp->effective_mems)); |
734d4513 | 1217 | |
d66393e5 | 1218 | update_tasks_nodemask(cp); |
5c5cc623 LZ |
1219 | |
1220 | rcu_read_lock(); | |
1221 | css_put(&cp->css); | |
1222 | } | |
1223 | rcu_read_unlock(); | |
1224 | } | |
1225 | ||
0b2f630a MX |
1226 | /* |
1227 | * Handle user request to change the 'mems' memory placement | |
1228 | * of a cpuset. Needs to validate the request, update the | |
58568d2a MX |
1229 | * cpusets mems_allowed, and for each task in the cpuset, |
1230 | * update mems_allowed and rebind task's mempolicy and any vma | |
1231 | * mempolicies and if the cpuset is marked 'memory_migrate', | |
1232 | * migrate the tasks pages to the new memory. | |
0b2f630a | 1233 | * |
8447a0fe | 1234 | * Call with cpuset_mutex held. May take callback_lock during call. |
0b2f630a MX |
1235 | * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, |
1236 | * lock each such tasks mm->mmap_sem, scan its vma's and rebind | |
1237 | * their mempolicies to the cpusets new mems_allowed. | |
1238 | */ | |
645fcc9d LZ |
1239 | static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, |
1240 | const char *buf) | |
0b2f630a | 1241 | { |
0b2f630a MX |
1242 | int retval; |
1243 | ||
1244 | /* | |
38d7bee9 | 1245 | * top_cpuset.mems_allowed tracks node_stats[N_MEMORY]; |
0b2f630a MX |
1246 | * it's read-only |
1247 | */ | |
53feb297 MX |
1248 | if (cs == &top_cpuset) { |
1249 | retval = -EACCES; | |
1250 | goto done; | |
1251 | } | |
0b2f630a | 1252 | |
0b2f630a MX |
1253 | /* |
1254 | * An empty mems_allowed is ok iff there are no tasks in the cpuset. | |
1255 | * Since nodelist_parse() fails on an empty mask, we special case | |
1256 | * that parsing. The validate_change() call ensures that cpusets | |
1257 | * with tasks have memory. | |
1258 | */ | |
1259 | if (!*buf) { | |
645fcc9d | 1260 | nodes_clear(trialcs->mems_allowed); |
0b2f630a | 1261 | } else { |
645fcc9d | 1262 | retval = nodelist_parse(buf, trialcs->mems_allowed); |
0b2f630a MX |
1263 | if (retval < 0) |
1264 | goto done; | |
1265 | ||
645fcc9d | 1266 | if (!nodes_subset(trialcs->mems_allowed, |
5d8ba82c LZ |
1267 | top_cpuset.mems_allowed)) { |
1268 | retval = -EINVAL; | |
53feb297 MX |
1269 | goto done; |
1270 | } | |
0b2f630a | 1271 | } |
33ad801d LZ |
1272 | |
1273 | if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { | |
0b2f630a MX |
1274 | retval = 0; /* Too easy - nothing to do */ |
1275 | goto done; | |
1276 | } | |
645fcc9d | 1277 | retval = validate_change(cs, trialcs); |
0b2f630a MX |
1278 | if (retval < 0) |
1279 | goto done; | |
1280 | ||
8447a0fe | 1281 | spin_lock_irq(&callback_lock); |
645fcc9d | 1282 | cs->mems_allowed = trialcs->mems_allowed; |
8447a0fe | 1283 | spin_unlock_irq(&callback_lock); |
0b2f630a | 1284 | |
734d4513 | 1285 | /* use trialcs->mems_allowed as a temp variable */ |
24ee3cf8 | 1286 | update_nodemasks_hier(cs, &trialcs->mems_allowed); |
0b2f630a MX |
1287 | done: |
1288 | return retval; | |
1289 | } | |
1290 | ||
8793d854 PM |
1291 | int current_cpuset_is_being_rebound(void) |
1292 | { | |
391acf97 GZ |
1293 | int ret; |
1294 | ||
1295 | rcu_read_lock(); | |
1296 | ret = task_cs(current) == cpuset_being_rebound; | |
1297 | rcu_read_unlock(); | |
1298 | ||
1299 | return ret; | |
8793d854 PM |
1300 | } |
1301 | ||
5be7a479 | 1302 | static int update_relax_domain_level(struct cpuset *cs, s64 val) |
1d3504fc | 1303 | { |
db7f47cf | 1304 | #ifdef CONFIG_SMP |
60495e77 | 1305 | if (val < -1 || val >= sched_domain_level_max) |
30e0e178 | 1306 | return -EINVAL; |
db7f47cf | 1307 | #endif |
1d3504fc HS |
1308 | |
1309 | if (val != cs->relax_domain_level) { | |
1310 | cs->relax_domain_level = val; | |
300ed6cb LZ |
1311 | if (!cpumask_empty(cs->cpus_allowed) && |
1312 | is_sched_load_balance(cs)) | |
699140ba | 1313 | rebuild_sched_domains_locked(); |
1d3504fc HS |
1314 | } |
1315 | ||
1316 | return 0; | |
1317 | } | |
1318 | ||
72ec7029 | 1319 | /** |
950592f7 MX |
1320 | * update_tasks_flags - update the spread flags of tasks in the cpuset. |
1321 | * @cs: the cpuset in which each task's spread flags needs to be changed | |
950592f7 | 1322 | * |
d66393e5 TH |
1323 | * Iterate through each task of @cs updating its spread flags. As this |
1324 | * function is called with cpuset_mutex held, cpuset membership stays | |
1325 | * stable. | |
950592f7 | 1326 | */ |
d66393e5 | 1327 | static void update_tasks_flags(struct cpuset *cs) |
950592f7 | 1328 | { |
d66393e5 TH |
1329 | struct css_task_iter it; |
1330 | struct task_struct *task; | |
1331 | ||
1332 | css_task_iter_start(&cs->css, &it); | |
1333 | while ((task = css_task_iter_next(&it))) | |
1334 | cpuset_update_task_spread_flag(cs, task); | |
1335 | css_task_iter_end(&it); | |
950592f7 MX |
1336 | } |
1337 | ||
1da177e4 LT |
1338 | /* |
1339 | * update_flag - read a 0 or a 1 in a file and update associated flag | |
78608366 PM |
1340 | * bit: the bit to update (see cpuset_flagbits_t) |
1341 | * cs: the cpuset to update | |
1342 | * turning_on: whether the flag is being set or cleared | |
053199ed | 1343 | * |
5d21cc2d | 1344 | * Call with cpuset_mutex held. |
1da177e4 LT |
1345 | */ |
1346 | ||
700fe1ab PM |
1347 | static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, |
1348 | int turning_on) | |
1da177e4 | 1349 | { |
645fcc9d | 1350 | struct cpuset *trialcs; |
40b6a762 | 1351 | int balance_flag_changed; |
950592f7 | 1352 | int spread_flag_changed; |
950592f7 | 1353 | int err; |
1da177e4 | 1354 | |
645fcc9d LZ |
1355 | trialcs = alloc_trial_cpuset(cs); |
1356 | if (!trialcs) | |
1357 | return -ENOMEM; | |
1358 | ||
1da177e4 | 1359 | if (turning_on) |
645fcc9d | 1360 | set_bit(bit, &trialcs->flags); |
1da177e4 | 1361 | else |
645fcc9d | 1362 | clear_bit(bit, &trialcs->flags); |
1da177e4 | 1363 | |
645fcc9d | 1364 | err = validate_change(cs, trialcs); |
85d7b949 | 1365 | if (err < 0) |
645fcc9d | 1366 | goto out; |
029190c5 | 1367 | |
029190c5 | 1368 | balance_flag_changed = (is_sched_load_balance(cs) != |
645fcc9d | 1369 | is_sched_load_balance(trialcs)); |
029190c5 | 1370 | |
950592f7 MX |
1371 | spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) |
1372 | || (is_spread_page(cs) != is_spread_page(trialcs))); | |
1373 | ||
8447a0fe | 1374 | spin_lock_irq(&callback_lock); |
645fcc9d | 1375 | cs->flags = trialcs->flags; |
8447a0fe | 1376 | spin_unlock_irq(&callback_lock); |
85d7b949 | 1377 | |
300ed6cb | 1378 | if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) |
699140ba | 1379 | rebuild_sched_domains_locked(); |
029190c5 | 1380 | |
950592f7 | 1381 | if (spread_flag_changed) |
d66393e5 | 1382 | update_tasks_flags(cs); |
645fcc9d LZ |
1383 | out: |
1384 | free_trial_cpuset(trialcs); | |
1385 | return err; | |
1da177e4 LT |
1386 | } |
1387 | ||
3e0d98b9 | 1388 | /* |
80f7228b | 1389 | * Frequency meter - How fast is some event occurring? |
3e0d98b9 PJ |
1390 | * |
1391 | * These routines manage a digitally filtered, constant time based, | |
1392 | * event frequency meter. There are four routines: | |
1393 | * fmeter_init() - initialize a frequency meter. | |
1394 | * fmeter_markevent() - called each time the event happens. | |
1395 | * fmeter_getrate() - returns the recent rate of such events. | |
1396 | * fmeter_update() - internal routine used to update fmeter. | |
1397 | * | |
1398 | * A common data structure is passed to each of these routines, | |
1399 | * which is used to keep track of the state required to manage the | |
1400 | * frequency meter and its digital filter. | |
1401 | * | |
1402 | * The filter works on the number of events marked per unit time. | |
1403 | * The filter is single-pole low-pass recursive (IIR). The time unit | |
1404 | * is 1 second. Arithmetic is done using 32-bit integers scaled to | |
1405 | * simulate 3 decimal digits of precision (multiplied by 1000). | |
1406 | * | |
1407 | * With an FM_COEF of 933, and a time base of 1 second, the filter | |
1408 | * has a half-life of 10 seconds, meaning that if the events quit | |
1409 | * happening, then the rate returned from the fmeter_getrate() | |
1410 | * will be cut in half each 10 seconds, until it converges to zero. | |
1411 | * | |
1412 | * It is not worth doing a real infinitely recursive filter. If more | |
1413 | * than FM_MAXTICKS ticks have elapsed since the last filter event, | |
1414 | * just compute FM_MAXTICKS ticks worth, by which point the level | |
1415 | * will be stable. | |
1416 | * | |
1417 | * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid | |
1418 | * arithmetic overflow in the fmeter_update() routine. | |
1419 | * | |
1420 | * Given the simple 32 bit integer arithmetic used, this meter works | |
1421 | * best for reporting rates between one per millisecond (msec) and | |
1422 | * one per 32 (approx) seconds. At constant rates faster than one | |
1423 | * per msec it maxes out at values just under 1,000,000. At constant | |
1424 | * rates between one per msec, and one per second it will stabilize | |
1425 | * to a value N*1000, where N is the rate of events per second. | |
1426 | * At constant rates between one per second and one per 32 seconds, | |
1427 | * it will be choppy, moving up on the seconds that have an event, | |
1428 | * and then decaying until the next event. At rates slower than | |
1429 | * about one in 32 seconds, it decays all the way back to zero between | |
1430 | * each event. | |
1431 | */ | |
1432 | ||
1433 | #define FM_COEF 933 /* coefficient for half-life of 10 secs */ | |
1434 | #define FM_MAXTICKS ((time_t)99) /* useless computing more ticks than this */ | |
1435 | #define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */ | |
1436 | #define FM_SCALE 1000 /* faux fixed point scale */ | |
1437 | ||
1438 | /* Initialize a frequency meter */ | |
1439 | static void fmeter_init(struct fmeter *fmp) | |
1440 | { | |
1441 | fmp->cnt = 0; | |
1442 | fmp->val = 0; | |
1443 | fmp->time = 0; | |
1444 | spin_lock_init(&fmp->lock); | |
1445 | } | |
1446 | ||
1447 | /* Internal meter update - process cnt events and update value */ | |
1448 | static void fmeter_update(struct fmeter *fmp) | |
1449 | { | |
1450 | time_t now = get_seconds(); | |
1451 | time_t ticks = now - fmp->time; | |
1452 | ||
1453 | if (ticks == 0) | |
1454 | return; | |
1455 | ||
1456 | ticks = min(FM_MAXTICKS, ticks); | |
1457 | while (ticks-- > 0) | |
1458 | fmp->val = (FM_COEF * fmp->val) / FM_SCALE; | |
1459 | fmp->time = now; | |
1460 | ||
1461 | fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE; | |
1462 | fmp->cnt = 0; | |
1463 | } | |
1464 | ||
1465 | /* Process any previous ticks, then bump cnt by one (times scale). */ | |
1466 | static void fmeter_markevent(struct fmeter *fmp) | |
1467 | { | |
1468 | spin_lock(&fmp->lock); | |
1469 | fmeter_update(fmp); | |
1470 | fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE); | |
1471 | spin_unlock(&fmp->lock); | |
1472 | } | |
1473 | ||
1474 | /* Process any previous ticks, then return current value. */ | |
1475 | static int fmeter_getrate(struct fmeter *fmp) | |
1476 | { | |
1477 | int val; | |
1478 | ||
1479 | spin_lock(&fmp->lock); | |
1480 | fmeter_update(fmp); | |
1481 | val = fmp->val; | |
1482 | spin_unlock(&fmp->lock); | |
1483 | return val; | |
1484 | } | |
1485 | ||
57fce0a6 TH |
1486 | static struct cpuset *cpuset_attach_old_cs; |
1487 | ||
5d21cc2d | 1488 | /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */ |
1f7dd3e5 | 1489 | static int cpuset_can_attach(struct cgroup_taskset *tset) |
f780bdb7 | 1490 | { |
1f7dd3e5 TH |
1491 | struct cgroup_subsys_state *css; |
1492 | struct cpuset *cs; | |
bb9d97b6 TH |
1493 | struct task_struct *task; |
1494 | int ret; | |
1da177e4 | 1495 | |
57fce0a6 | 1496 | /* used later by cpuset_attach() */ |
1f7dd3e5 TH |
1497 | cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css)); |
1498 | cs = css_cs(css); | |
57fce0a6 | 1499 | |
5d21cc2d TH |
1500 | mutex_lock(&cpuset_mutex); |
1501 | ||
aa6ec29b | 1502 | /* allow moving tasks into an empty cpuset if on default hierarchy */ |
5d21cc2d | 1503 | ret = -ENOSPC; |
9e10a130 | 1504 | if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && |
88fa523b | 1505 | (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) |
5d21cc2d | 1506 | goto out_unlock; |
9985b0ba | 1507 | |
1f7dd3e5 | 1508 | cgroup_taskset_for_each(task, css, tset) { |
7f51412a JL |
1509 | ret = task_can_attach(task, cs->cpus_allowed); |
1510 | if (ret) | |
5d21cc2d TH |
1511 | goto out_unlock; |
1512 | ret = security_task_setscheduler(task); | |
1513 | if (ret) | |
1514 | goto out_unlock; | |
bb9d97b6 | 1515 | } |
f780bdb7 | 1516 | |
452477fa TH |
1517 | /* |
1518 | * Mark attach is in progress. This makes validate_change() fail | |
1519 | * changes which zero cpus/mems_allowed. | |
1520 | */ | |
1521 | cs->attach_in_progress++; | |
5d21cc2d TH |
1522 | ret = 0; |
1523 | out_unlock: | |
1524 | mutex_unlock(&cpuset_mutex); | |
1525 | return ret; | |
8793d854 | 1526 | } |
f780bdb7 | 1527 | |
1f7dd3e5 | 1528 | static void cpuset_cancel_attach(struct cgroup_taskset *tset) |
452477fa | 1529 | { |
1f7dd3e5 TH |
1530 | struct cgroup_subsys_state *css; |
1531 | struct cpuset *cs; | |
1532 | ||
1533 | cgroup_taskset_first(tset, &css); | |
1534 | cs = css_cs(css); | |
1535 | ||
5d21cc2d | 1536 | mutex_lock(&cpuset_mutex); |
eb95419b | 1537 | css_cs(css)->attach_in_progress--; |
5d21cc2d | 1538 | mutex_unlock(&cpuset_mutex); |
8793d854 | 1539 | } |
1da177e4 | 1540 | |
4e4c9a14 | 1541 | /* |
5d21cc2d | 1542 | * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach() |
4e4c9a14 TH |
1543 | * but we can't allocate it dynamically there. Define it global and |
1544 | * allocate from cpuset_init(). | |
1545 | */ | |
1546 | static cpumask_var_t cpus_attach; | |
1547 | ||
1f7dd3e5 | 1548 | static void cpuset_attach(struct cgroup_taskset *tset) |
8793d854 | 1549 | { |
67bd2c59 | 1550 | /* static buf protected by cpuset_mutex */ |
4e4c9a14 | 1551 | static nodemask_t cpuset_attach_nodemask_to; |
bb9d97b6 | 1552 | struct task_struct *task; |
4530eddb | 1553 | struct task_struct *leader; |
1f7dd3e5 TH |
1554 | struct cgroup_subsys_state *css; |
1555 | struct cpuset *cs; | |
57fce0a6 | 1556 | struct cpuset *oldcs = cpuset_attach_old_cs; |
22fb52dd | 1557 | |
1f7dd3e5 TH |
1558 | cgroup_taskset_first(tset, &css); |
1559 | cs = css_cs(css); | |
1560 | ||
5d21cc2d TH |
1561 | mutex_lock(&cpuset_mutex); |
1562 | ||
4e4c9a14 TH |
1563 | /* prepare for attach */ |
1564 | if (cs == &top_cpuset) | |
1565 | cpumask_copy(cpus_attach, cpu_possible_mask); | |
1566 | else | |
ae1c8023 | 1567 | guarantee_online_cpus(cs, cpus_attach); |
4e4c9a14 | 1568 | |
ae1c8023 | 1569 | guarantee_online_mems(cs, &cpuset_attach_nodemask_to); |
4e4c9a14 | 1570 | |
1f7dd3e5 | 1571 | cgroup_taskset_for_each(task, css, tset) { |
bb9d97b6 TH |
1572 | /* |
1573 | * can_attach beforehand should guarantee that this doesn't | |
1574 | * fail. TODO: have a better way to handle failure here | |
1575 | */ | |
1576 | WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach)); | |
1577 | ||
1578 | cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to); | |
1579 | cpuset_update_task_spread_flag(cs, task); | |
1580 | } | |
22fb52dd | 1581 | |
f780bdb7 | 1582 | /* |
4530eddb TH |
1583 | * Change mm for all threadgroup leaders. This is expensive and may |
1584 | * sleep and should be moved outside migration path proper. | |
f780bdb7 | 1585 | */ |
ae1c8023 | 1586 | cpuset_attach_nodemask_to = cs->effective_mems; |
1f7dd3e5 | 1587 | cgroup_taskset_for_each_leader(leader, css, tset) { |
3df9ca0a TH |
1588 | struct mm_struct *mm = get_task_mm(leader); |
1589 | ||
1590 | if (mm) { | |
1591 | mpol_rebind_mm(mm, &cpuset_attach_nodemask_to); | |
1592 | ||
1593 | /* | |
1594 | * old_mems_allowed is the same with mems_allowed | |
1595 | * here, except if this task is being moved | |
1596 | * automatically due to hotplug. In that case | |
1597 | * @mems_allowed has been updated and is empty, so | |
1598 | * @old_mems_allowed is the right nodesets that we | |
1599 | * migrate mm from. | |
1600 | */ | |
fff4dc84 | 1601 | if (is_memory_migrate(cs)) |
3df9ca0a TH |
1602 | cpuset_migrate_mm(mm, &oldcs->old_mems_allowed, |
1603 | &cpuset_attach_nodemask_to); | |
fff4dc84 TH |
1604 | else |
1605 | mmput(mm); | |
f047cecf | 1606 | } |
4225399a | 1607 | } |
452477fa | 1608 | |
33ad801d | 1609 | cs->old_mems_allowed = cpuset_attach_nodemask_to; |
02bb5863 | 1610 | |
452477fa | 1611 | cs->attach_in_progress--; |
e44193d3 LZ |
1612 | if (!cs->attach_in_progress) |
1613 | wake_up(&cpuset_attach_wq); | |
5d21cc2d TH |
1614 | |
1615 | mutex_unlock(&cpuset_mutex); | |
1da177e4 LT |
1616 | } |
1617 | ||
1618 | /* The various types of files and directories in a cpuset file system */ | |
1619 | ||
1620 | typedef enum { | |
45b07ef3 | 1621 | FILE_MEMORY_MIGRATE, |
1da177e4 LT |
1622 | FILE_CPULIST, |
1623 | FILE_MEMLIST, | |
afd1a8b3 LZ |
1624 | FILE_EFFECTIVE_CPULIST, |
1625 | FILE_EFFECTIVE_MEMLIST, | |
1da177e4 LT |
1626 | FILE_CPU_EXCLUSIVE, |
1627 | FILE_MEM_EXCLUSIVE, | |
78608366 | 1628 | FILE_MEM_HARDWALL, |
029190c5 | 1629 | FILE_SCHED_LOAD_BALANCE, |
1d3504fc | 1630 | FILE_SCHED_RELAX_DOMAIN_LEVEL, |
3e0d98b9 PJ |
1631 | FILE_MEMORY_PRESSURE_ENABLED, |
1632 | FILE_MEMORY_PRESSURE, | |
825a46af PJ |
1633 | FILE_SPREAD_PAGE, |
1634 | FILE_SPREAD_SLAB, | |
1cac41cb | 1635 | FILE_FAMILY_BOOST, |
1da177e4 LT |
1636 | } cpuset_filetype_t; |
1637 | ||
182446d0 TH |
1638 | static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft, |
1639 | u64 val) | |
700fe1ab | 1640 | { |
182446d0 | 1641 | struct cpuset *cs = css_cs(css); |
700fe1ab | 1642 | cpuset_filetype_t type = cft->private; |
a903f086 | 1643 | int retval = 0; |
700fe1ab | 1644 | |
5d21cc2d | 1645 | mutex_lock(&cpuset_mutex); |
a903f086 LZ |
1646 | if (!is_cpuset_online(cs)) { |
1647 | retval = -ENODEV; | |
5d21cc2d | 1648 | goto out_unlock; |
a903f086 | 1649 | } |
700fe1ab PM |
1650 | |
1651 | switch (type) { | |
1da177e4 | 1652 | case FILE_CPU_EXCLUSIVE: |
700fe1ab | 1653 | retval = update_flag(CS_CPU_EXCLUSIVE, cs, val); |
1da177e4 LT |
1654 | break; |
1655 | case FILE_MEM_EXCLUSIVE: | |
700fe1ab | 1656 | retval = update_flag(CS_MEM_EXCLUSIVE, cs, val); |
1da177e4 | 1657 | break; |
78608366 PM |
1658 | case FILE_MEM_HARDWALL: |
1659 | retval = update_flag(CS_MEM_HARDWALL, cs, val); | |
1660 | break; | |
029190c5 | 1661 | case FILE_SCHED_LOAD_BALANCE: |
700fe1ab | 1662 | retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val); |
1d3504fc | 1663 | break; |
45b07ef3 | 1664 | case FILE_MEMORY_MIGRATE: |
700fe1ab | 1665 | retval = update_flag(CS_MEMORY_MIGRATE, cs, val); |
45b07ef3 | 1666 | break; |
3e0d98b9 | 1667 | case FILE_MEMORY_PRESSURE_ENABLED: |
700fe1ab | 1668 | cpuset_memory_pressure_enabled = !!val; |
3e0d98b9 | 1669 | break; |
825a46af | 1670 | case FILE_SPREAD_PAGE: |
700fe1ab | 1671 | retval = update_flag(CS_SPREAD_PAGE, cs, val); |
825a46af PJ |
1672 | break; |
1673 | case FILE_SPREAD_SLAB: | |
700fe1ab | 1674 | retval = update_flag(CS_SPREAD_SLAB, cs, val); |
825a46af | 1675 | break; |
1cac41cb MB |
1676 | case FILE_FAMILY_BOOST: |
1677 | retval = update_flag(CS_FAMILY_BOOST, cs, val); | |
1678 | break; | |
1da177e4 LT |
1679 | default: |
1680 | retval = -EINVAL; | |
700fe1ab | 1681 | break; |
1da177e4 | 1682 | } |
5d21cc2d TH |
1683 | out_unlock: |
1684 | mutex_unlock(&cpuset_mutex); | |
1da177e4 LT |
1685 | return retval; |
1686 | } | |
1687 | ||
182446d0 TH |
1688 | static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft, |
1689 | s64 val) | |
5be7a479 | 1690 | { |
182446d0 | 1691 | struct cpuset *cs = css_cs(css); |
5be7a479 | 1692 | cpuset_filetype_t type = cft->private; |
5d21cc2d | 1693 | int retval = -ENODEV; |
5be7a479 | 1694 | |
5d21cc2d TH |
1695 | mutex_lock(&cpuset_mutex); |
1696 | if (!is_cpuset_online(cs)) | |
1697 | goto out_unlock; | |
e3712395 | 1698 | |
5be7a479 PM |
1699 | switch (type) { |
1700 | case FILE_SCHED_RELAX_DOMAIN_LEVEL: | |
1701 | retval = update_relax_domain_level(cs, val); | |
1702 | break; | |
1703 | default: | |
1704 | retval = -EINVAL; | |
1705 | break; | |
1706 | } | |
5d21cc2d TH |
1707 | out_unlock: |
1708 | mutex_unlock(&cpuset_mutex); | |
5be7a479 PM |
1709 | return retval; |
1710 | } | |
1711 | ||
e3712395 PM |
1712 | /* |
1713 | * Common handling for a write to a "cpus" or "mems" file. | |
1714 | */ | |
451af504 TH |
1715 | static ssize_t cpuset_write_resmask(struct kernfs_open_file *of, |
1716 | char *buf, size_t nbytes, loff_t off) | |
e3712395 | 1717 | { |
451af504 | 1718 | struct cpuset *cs = css_cs(of_css(of)); |
645fcc9d | 1719 | struct cpuset *trialcs; |
5d21cc2d | 1720 | int retval = -ENODEV; |
e3712395 | 1721 | |
451af504 TH |
1722 | buf = strstrip(buf); |
1723 | ||
3a5a6d0c TH |
1724 | /* |
1725 | * CPU or memory hotunplug may leave @cs w/o any execution | |
1726 | * resources, in which case the hotplug code asynchronously updates | |
1727 | * configuration and transfers all tasks to the nearest ancestor | |
1728 | * which can execute. | |
1729 | * | |
1730 | * As writes to "cpus" or "mems" may restore @cs's execution | |
1731 | * resources, wait for the previously scheduled operations before | |
1732 | * proceeding, so that we don't end up keep removing tasks added | |
1733 | * after execution capability is restored. | |
76bb5ab8 TH |
1734 | * |
1735 | * cpuset_hotplug_work calls back into cgroup core via | |
1736 | * cgroup_transfer_tasks() and waiting for it from a cgroupfs | |
1737 | * operation like this one can lead to a deadlock through kernfs | |
1738 | * active_ref protection. Let's break the protection. Losing the | |
1739 | * protection is okay as we check whether @cs is online after | |
1740 | * grabbing cpuset_mutex anyway. This only happens on the legacy | |
1741 | * hierarchies. | |
3a5a6d0c | 1742 | */ |
76bb5ab8 TH |
1743 | css_get(&cs->css); |
1744 | kernfs_break_active_protection(of->kn); | |
3a5a6d0c TH |
1745 | flush_work(&cpuset_hotplug_work); |
1746 | ||
5d21cc2d TH |
1747 | mutex_lock(&cpuset_mutex); |
1748 | if (!is_cpuset_online(cs)) | |
1749 | goto out_unlock; | |
e3712395 | 1750 | |
645fcc9d | 1751 | trialcs = alloc_trial_cpuset(cs); |
b75f38d6 LZ |
1752 | if (!trialcs) { |
1753 | retval = -ENOMEM; | |
5d21cc2d | 1754 | goto out_unlock; |
b75f38d6 | 1755 | } |
645fcc9d | 1756 | |
451af504 | 1757 | switch (of_cft(of)->private) { |
e3712395 | 1758 | case FILE_CPULIST: |
645fcc9d | 1759 | retval = update_cpumask(cs, trialcs, buf); |
e3712395 PM |
1760 | break; |
1761 | case FILE_MEMLIST: | |
645fcc9d | 1762 | retval = update_nodemask(cs, trialcs, buf); |
e3712395 PM |
1763 | break; |
1764 | default: | |
1765 | retval = -EINVAL; | |
1766 | break; | |
1767 | } | |
645fcc9d LZ |
1768 | |
1769 | free_trial_cpuset(trialcs); | |
5d21cc2d TH |
1770 | out_unlock: |
1771 | mutex_unlock(&cpuset_mutex); | |
76bb5ab8 TH |
1772 | kernfs_unbreak_active_protection(of->kn); |
1773 | css_put(&cs->css); | |
fff4dc84 | 1774 | flush_workqueue(cpuset_migrate_mm_wq); |
451af504 | 1775 | return retval ?: nbytes; |
e3712395 PM |
1776 | } |
1777 | ||
1da177e4 LT |
1778 | /* |
1779 | * These ascii lists should be read in a single call, by using a user | |
1780 | * buffer large enough to hold the entire map. If read in smaller | |
1781 | * chunks, there is no guarantee of atomicity. Since the display format | |
1782 | * used, list of ranges of sequential numbers, is variable length, | |
1783 | * and since these maps can change value dynamically, one could read | |
1784 | * gibberish by doing partial reads while a list was changing. | |
1da177e4 | 1785 | */ |
2da8ca82 | 1786 | static int cpuset_common_seq_show(struct seq_file *sf, void *v) |
1da177e4 | 1787 | { |
2da8ca82 TH |
1788 | struct cpuset *cs = css_cs(seq_css(sf)); |
1789 | cpuset_filetype_t type = seq_cft(sf)->private; | |
51ffe411 | 1790 | int ret = 0; |
1da177e4 | 1791 | |
8447a0fe | 1792 | spin_lock_irq(&callback_lock); |
1da177e4 LT |
1793 | |
1794 | switch (type) { | |
1795 | case FILE_CPULIST: | |
cd55f3c2 | 1796 | seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_requested)); |
1da177e4 LT |
1797 | break; |
1798 | case FILE_MEMLIST: | |
e8e6d97c | 1799 | seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); |
1da177e4 | 1800 | break; |
afd1a8b3 | 1801 | case FILE_EFFECTIVE_CPULIST: |
e8e6d97c | 1802 | seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); |
afd1a8b3 LZ |
1803 | break; |
1804 | case FILE_EFFECTIVE_MEMLIST: | |
e8e6d97c | 1805 | seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); |
afd1a8b3 | 1806 | break; |
1da177e4 | 1807 | default: |
51ffe411 | 1808 | ret = -EINVAL; |
1da177e4 | 1809 | } |
1da177e4 | 1810 | |
8447a0fe | 1811 | spin_unlock_irq(&callback_lock); |
51ffe411 | 1812 | return ret; |
1da177e4 LT |
1813 | } |
1814 | ||
182446d0 | 1815 | static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft) |
700fe1ab | 1816 | { |
182446d0 | 1817 | struct cpuset *cs = css_cs(css); |
700fe1ab PM |
1818 | cpuset_filetype_t type = cft->private; |
1819 | switch (type) { | |
1820 | case FILE_CPU_EXCLUSIVE: | |
1821 | return is_cpu_exclusive(cs); | |
1822 | case FILE_MEM_EXCLUSIVE: | |
1823 | return is_mem_exclusive(cs); | |
78608366 PM |
1824 | case FILE_MEM_HARDWALL: |
1825 | return is_mem_hardwall(cs); | |
700fe1ab PM |
1826 | case FILE_SCHED_LOAD_BALANCE: |
1827 | return is_sched_load_balance(cs); | |
1828 | case FILE_MEMORY_MIGRATE: | |
1829 | return is_memory_migrate(cs); | |
1830 | case FILE_MEMORY_PRESSURE_ENABLED: | |
1831 | return cpuset_memory_pressure_enabled; | |
1832 | case FILE_MEMORY_PRESSURE: | |
1833 | return fmeter_getrate(&cs->fmeter); | |
1834 | case FILE_SPREAD_PAGE: | |
1835 | return is_spread_page(cs); | |
1836 | case FILE_SPREAD_SLAB: | |
1837 | return is_spread_slab(cs); | |
1cac41cb MB |
1838 | case FILE_FAMILY_BOOST: |
1839 | return is_family_boost_enabled(cs); | |
700fe1ab PM |
1840 | default: |
1841 | BUG(); | |
1842 | } | |
cf417141 MK |
1843 | |
1844 | /* Unreachable but makes gcc happy */ | |
1845 | return 0; | |
700fe1ab | 1846 | } |
1da177e4 | 1847 | |
182446d0 | 1848 | static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft) |
5be7a479 | 1849 | { |
182446d0 | 1850 | struct cpuset *cs = css_cs(css); |
5be7a479 PM |
1851 | cpuset_filetype_t type = cft->private; |
1852 | switch (type) { | |
1853 | case FILE_SCHED_RELAX_DOMAIN_LEVEL: | |
1854 | return cs->relax_domain_level; | |
1855 | default: | |
1856 | BUG(); | |
1857 | } | |
cf417141 MK |
1858 | |
1859 | /* Unrechable but makes gcc happy */ | |
1860 | return 0; | |
5be7a479 PM |
1861 | } |
1862 | ||
1da177e4 LT |
1863 | |
1864 | /* | |
1865 | * for the common functions, 'private' gives the type of file | |
1866 | */ | |
1867 | ||
addf2c73 PM |
1868 | static struct cftype files[] = { |
1869 | { | |
1870 | .name = "cpus", | |
2da8ca82 | 1871 | .seq_show = cpuset_common_seq_show, |
451af504 | 1872 | .write = cpuset_write_resmask, |
e3712395 | 1873 | .max_write_len = (100U + 6 * NR_CPUS), |
addf2c73 PM |
1874 | .private = FILE_CPULIST, |
1875 | }, | |
1876 | ||
1877 | { | |
1878 | .name = "mems", | |
2da8ca82 | 1879 | .seq_show = cpuset_common_seq_show, |
451af504 | 1880 | .write = cpuset_write_resmask, |
e3712395 | 1881 | .max_write_len = (100U + 6 * MAX_NUMNODES), |
addf2c73 PM |
1882 | .private = FILE_MEMLIST, |
1883 | }, | |
1884 | ||
afd1a8b3 LZ |
1885 | { |
1886 | .name = "effective_cpus", | |
1887 | .seq_show = cpuset_common_seq_show, | |
1888 | .private = FILE_EFFECTIVE_CPULIST, | |
1889 | }, | |
1890 | ||
1891 | { | |
1892 | .name = "effective_mems", | |
1893 | .seq_show = cpuset_common_seq_show, | |
1894 | .private = FILE_EFFECTIVE_MEMLIST, | |
1895 | }, | |
1896 | ||
addf2c73 PM |
1897 | { |
1898 | .name = "cpu_exclusive", | |
1899 | .read_u64 = cpuset_read_u64, | |
1900 | .write_u64 = cpuset_write_u64, | |
1901 | .private = FILE_CPU_EXCLUSIVE, | |
1902 | }, | |
1903 | ||
1904 | { | |
1905 | .name = "mem_exclusive", | |
1906 | .read_u64 = cpuset_read_u64, | |
1907 | .write_u64 = cpuset_write_u64, | |
1908 | .private = FILE_MEM_EXCLUSIVE, | |
1909 | }, | |
1910 | ||
78608366 PM |
1911 | { |
1912 | .name = "mem_hardwall", | |
1913 | .read_u64 = cpuset_read_u64, | |
1914 | .write_u64 = cpuset_write_u64, | |
1915 | .private = FILE_MEM_HARDWALL, | |
1916 | }, | |
1917 | ||
addf2c73 PM |
1918 | { |
1919 | .name = "sched_load_balance", | |
1920 | .read_u64 = cpuset_read_u64, | |
1921 | .write_u64 = cpuset_write_u64, | |
1922 | .private = FILE_SCHED_LOAD_BALANCE, | |
1923 | }, | |
1924 | ||
1925 | { | |
1926 | .name = "sched_relax_domain_level", | |
5be7a479 PM |
1927 | .read_s64 = cpuset_read_s64, |
1928 | .write_s64 = cpuset_write_s64, | |
addf2c73 PM |
1929 | .private = FILE_SCHED_RELAX_DOMAIN_LEVEL, |
1930 | }, | |
1931 | ||
1932 | { | |
1933 | .name = "memory_migrate", | |
1934 | .read_u64 = cpuset_read_u64, | |
1935 | .write_u64 = cpuset_write_u64, | |
1936 | .private = FILE_MEMORY_MIGRATE, | |
1937 | }, | |
1938 | ||
1939 | { | |
1940 | .name = "memory_pressure", | |
1941 | .read_u64 = cpuset_read_u64, | |
ed48d923 | 1942 | .private = FILE_MEMORY_PRESSURE, |
addf2c73 PM |
1943 | }, |
1944 | ||
1945 | { | |
1946 | .name = "memory_spread_page", | |
1947 | .read_u64 = cpuset_read_u64, | |
1948 | .write_u64 = cpuset_write_u64, | |
1949 | .private = FILE_SPREAD_PAGE, | |
1950 | }, | |
1951 | ||
1952 | { | |
1953 | .name = "memory_spread_slab", | |
1954 | .read_u64 = cpuset_read_u64, | |
1955 | .write_u64 = cpuset_write_u64, | |
1956 | .private = FILE_SPREAD_SLAB, | |
1957 | }, | |
3e0d98b9 | 1958 | |
4baf6e33 TH |
1959 | { |
1960 | .name = "memory_pressure_enabled", | |
1961 | .flags = CFTYPE_ONLY_ON_ROOT, | |
1962 | .read_u64 = cpuset_read_u64, | |
1963 | .write_u64 = cpuset_write_u64, | |
1964 | .private = FILE_MEMORY_PRESSURE_ENABLED, | |
1965 | }, | |
1da177e4 | 1966 | |
1cac41cb MB |
1967 | { |
1968 | .name = "family_boost", | |
1969 | .read_u64 = cpuset_read_u64, | |
1970 | .write_u64 = cpuset_write_u64, | |
1971 | .private = FILE_FAMILY_BOOST, | |
1972 | }, | |
1973 | ||
1974 | ||
4baf6e33 TH |
1975 | { } /* terminate */ |
1976 | }; | |
1da177e4 LT |
1977 | |
1978 | /* | |
92fb9748 | 1979 | * cpuset_css_alloc - allocate a cpuset css |
c9e5fe66 | 1980 | * cgrp: control group that the new cpuset will be part of |
1da177e4 LT |
1981 | */ |
1982 | ||
eb95419b TH |
1983 | static struct cgroup_subsys_state * |
1984 | cpuset_css_alloc(struct cgroup_subsys_state *parent_css) | |
1da177e4 | 1985 | { |
c8f699bb | 1986 | struct cpuset *cs; |
1da177e4 | 1987 | |
eb95419b | 1988 | if (!parent_css) |
8793d854 | 1989 | return &top_cpuset.css; |
033fa1c5 | 1990 | |
c8f699bb | 1991 | cs = kzalloc(sizeof(*cs), GFP_KERNEL); |
1da177e4 | 1992 | if (!cs) |
8793d854 | 1993 | return ERR_PTR(-ENOMEM); |
e2b9a3d7 LZ |
1994 | if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) |
1995 | goto free_cs; | |
cd55f3c2 RA |
1996 | if (!alloc_cpumask_var(&cs->cpus_requested, GFP_KERNEL)) |
1997 | goto free_allowed; | |
e2b9a3d7 | 1998 | if (!alloc_cpumask_var(&cs->effective_cpus, GFP_KERNEL)) |
cd55f3c2 | 1999 | goto free_requested; |
1da177e4 | 2000 | |
029190c5 | 2001 | set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); |
300ed6cb | 2002 | cpumask_clear(cs->cpus_allowed); |
cd55f3c2 | 2003 | cpumask_clear(cs->cpus_requested); |
f9a86fcb | 2004 | nodes_clear(cs->mems_allowed); |
e2b9a3d7 LZ |
2005 | cpumask_clear(cs->effective_cpus); |
2006 | nodes_clear(cs->effective_mems); | |
3e0d98b9 | 2007 | fmeter_init(&cs->fmeter); |
1d3504fc | 2008 | cs->relax_domain_level = -1; |
1da177e4 | 2009 | |
c8f699bb | 2010 | return &cs->css; |
e2b9a3d7 | 2011 | |
cd55f3c2 RA |
2012 | free_requested: |
2013 | free_cpumask_var(cs->cpus_requested); | |
2014 | free_allowed: | |
e2b9a3d7 LZ |
2015 | free_cpumask_var(cs->cpus_allowed); |
2016 | free_cs: | |
2017 | kfree(cs); | |
2018 | return ERR_PTR(-ENOMEM); | |
c8f699bb TH |
2019 | } |
2020 | ||
eb95419b | 2021 | static int cpuset_css_online(struct cgroup_subsys_state *css) |
c8f699bb | 2022 | { |
eb95419b | 2023 | struct cpuset *cs = css_cs(css); |
c431069f | 2024 | struct cpuset *parent = parent_cs(cs); |
ae8086ce | 2025 | struct cpuset *tmp_cs; |
492eb21b | 2026 | struct cgroup_subsys_state *pos_css; |
c8f699bb TH |
2027 | |
2028 | if (!parent) | |
2029 | return 0; | |
2030 | ||
5d21cc2d TH |
2031 | mutex_lock(&cpuset_mutex); |
2032 | ||
efeb77b2 | 2033 | set_bit(CS_ONLINE, &cs->flags); |
c8f699bb TH |
2034 | if (is_spread_page(parent)) |
2035 | set_bit(CS_SPREAD_PAGE, &cs->flags); | |
2036 | if (is_spread_slab(parent)) | |
2037 | set_bit(CS_SPREAD_SLAB, &cs->flags); | |
1da177e4 | 2038 | |
664eedde | 2039 | cpuset_inc(); |
033fa1c5 | 2040 | |
8447a0fe | 2041 | spin_lock_irq(&callback_lock); |
9e10a130 | 2042 | if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) { |
e2b9a3d7 LZ |
2043 | cpumask_copy(cs->effective_cpus, parent->effective_cpus); |
2044 | cs->effective_mems = parent->effective_mems; | |
2045 | } | |
8447a0fe | 2046 | spin_unlock_irq(&callback_lock); |
e2b9a3d7 | 2047 | |
eb95419b | 2048 | if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) |
5d21cc2d | 2049 | goto out_unlock; |
033fa1c5 TH |
2050 | |
2051 | /* | |
2052 | * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is | |
2053 | * set. This flag handling is implemented in cgroup core for | |
2054 | * histrical reasons - the flag may be specified during mount. | |
2055 | * | |
2056 | * Currently, if any sibling cpusets have exclusive cpus or mem, we | |
2057 | * refuse to clone the configuration - thereby refusing the task to | |
2058 | * be entered, and as a result refusing the sys_unshare() or | |
2059 | * clone() which initiated it. If this becomes a problem for some | |
2060 | * users who wish to allow that scenario, then this could be | |
2061 | * changed to grant parent->cpus_allowed-sibling_cpus_exclusive | |
2062 | * (and likewise for mems) to the new cgroup. | |
2063 | */ | |
ae8086ce | 2064 | rcu_read_lock(); |
492eb21b | 2065 | cpuset_for_each_child(tmp_cs, pos_css, parent) { |
ae8086ce TH |
2066 | if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) { |
2067 | rcu_read_unlock(); | |
5d21cc2d | 2068 | goto out_unlock; |
ae8086ce | 2069 | } |
033fa1c5 | 2070 | } |
ae8086ce | 2071 | rcu_read_unlock(); |
033fa1c5 | 2072 | |
8447a0fe | 2073 | spin_lock_irq(&callback_lock); |
033fa1c5 | 2074 | cs->mems_allowed = parent->mems_allowed; |
790317e1 | 2075 | cs->effective_mems = parent->mems_allowed; |
033fa1c5 | 2076 | cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); |
cd55f3c2 | 2077 | cpumask_copy(cs->cpus_requested, parent->cpus_requested); |
790317e1 | 2078 | cpumask_copy(cs->effective_cpus, parent->cpus_allowed); |
cea74465 | 2079 | spin_unlock_irq(&callback_lock); |
5d21cc2d TH |
2080 | out_unlock: |
2081 | mutex_unlock(&cpuset_mutex); | |
c8f699bb TH |
2082 | return 0; |
2083 | } | |
2084 | ||
0b9e6965 ZH |
2085 | /* |
2086 | * If the cpuset being removed has its flag 'sched_load_balance' | |
2087 | * enabled, then simulate turning sched_load_balance off, which | |
2088 | * will call rebuild_sched_domains_locked(). | |
2089 | */ | |
2090 | ||
eb95419b | 2091 | static void cpuset_css_offline(struct cgroup_subsys_state *css) |
c8f699bb | 2092 | { |
eb95419b | 2093 | struct cpuset *cs = css_cs(css); |
c8f699bb | 2094 | |
5d21cc2d | 2095 | mutex_lock(&cpuset_mutex); |
c8f699bb TH |
2096 | |
2097 | if (is_sched_load_balance(cs)) | |
2098 | update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); | |
2099 | ||
664eedde | 2100 | cpuset_dec(); |
efeb77b2 | 2101 | clear_bit(CS_ONLINE, &cs->flags); |
c8f699bb | 2102 | |
5d21cc2d | 2103 | mutex_unlock(&cpuset_mutex); |
1da177e4 LT |
2104 | } |
2105 | ||
eb95419b | 2106 | static void cpuset_css_free(struct cgroup_subsys_state *css) |
1da177e4 | 2107 | { |
eb95419b | 2108 | struct cpuset *cs = css_cs(css); |
1da177e4 | 2109 | |
e2b9a3d7 | 2110 | free_cpumask_var(cs->effective_cpus); |
300ed6cb | 2111 | free_cpumask_var(cs->cpus_allowed); |
cd55f3c2 | 2112 | free_cpumask_var(cs->cpus_requested); |
8793d854 | 2113 | kfree(cs); |
1da177e4 LT |
2114 | } |
2115 | ||
39bd0d15 LZ |
2116 | static void cpuset_bind(struct cgroup_subsys_state *root_css) |
2117 | { | |
2118 | mutex_lock(&cpuset_mutex); | |
8447a0fe | 2119 | spin_lock_irq(&callback_lock); |
39bd0d15 | 2120 | |
9e10a130 | 2121 | if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) { |
39bd0d15 LZ |
2122 | cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask); |
2123 | top_cpuset.mems_allowed = node_possible_map; | |
2124 | } else { | |
2125 | cpumask_copy(top_cpuset.cpus_allowed, | |
2126 | top_cpuset.effective_cpus); | |
2127 | top_cpuset.mems_allowed = top_cpuset.effective_mems; | |
2128 | } | |
2129 | ||
8447a0fe | 2130 | spin_unlock_irq(&callback_lock); |
39bd0d15 LZ |
2131 | mutex_unlock(&cpuset_mutex); |
2132 | } | |
2133 | ||
06ec7a1d ZL |
2134 | /* |
2135 | * Make sure the new task conform to the current state of its parent, | |
2136 | * which could have been changed by cpuset just after it inherits the | |
2137 | * state from the parent and before it sits on the cgroup's task list. | |
2138 | */ | |
29bd0359 | 2139 | void cpuset_fork(struct task_struct *task, void *priv) |
06ec7a1d ZL |
2140 | { |
2141 | if (task_css_is_root(task, cpuset_cgrp_id)) | |
2142 | return; | |
2143 | ||
2144 | set_cpus_allowed_ptr(task, ¤t->cpus_allowed); | |
2145 | task->mems_allowed = current->mems_allowed; | |
2146 | } | |
2147 | ||
1cac41cb MB |
2148 | static int cpuset_allow_attach(struct cgroup_taskset *tset) |
2149 | { | |
2150 | const struct cred *cred = current_cred(), *tcred; | |
2151 | struct task_struct *task; | |
2152 | struct cgroup_subsys_state *css; | |
2153 | ||
2154 | cgroup_taskset_for_each(task, css, tset) { | |
2155 | tcred = __task_cred(task); | |
2156 | ||
2157 | if ((current != task) && !capable(CAP_SYS_ADMIN) && | |
2158 | cred->euid.val != tcred->uid.val && cred->euid.val != tcred->suid.val) | |
2159 | return -EACCES; | |
2160 | } | |
2161 | ||
2162 | return 0; | |
2163 | } | |
2164 | ||
073219e9 | 2165 | struct cgroup_subsys cpuset_cgrp_subsys = { |
39bd0d15 LZ |
2166 | .css_alloc = cpuset_css_alloc, |
2167 | .css_online = cpuset_css_online, | |
2168 | .css_offline = cpuset_css_offline, | |
2169 | .css_free = cpuset_css_free, | |
2170 | .can_attach = cpuset_can_attach, | |
1cac41cb | 2171 | .allow_attach = cpuset_allow_attach, |
39bd0d15 LZ |
2172 | .cancel_attach = cpuset_cancel_attach, |
2173 | .attach = cpuset_attach, | |
d5209747 | 2174 | .post_attach = cpuset_post_attach, |
39bd0d15 | 2175 | .bind = cpuset_bind, |
06ec7a1d | 2176 | .fork = cpuset_fork, |
5577964e | 2177 | .legacy_cftypes = files, |
39bd0d15 | 2178 | .early_init = 1, |
8793d854 PM |
2179 | }; |
2180 | ||
1da177e4 LT |
2181 | /** |
2182 | * cpuset_init - initialize cpusets at system boot | |
2183 | * | |
2184 | * Description: Initialize top_cpuset and the cpuset internal file system, | |
2185 | **/ | |
2186 | ||
2187 | int __init cpuset_init(void) | |
2188 | { | |
8793d854 | 2189 | int err = 0; |
1da177e4 | 2190 | |
58568d2a MX |
2191 | if (!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL)) |
2192 | BUG(); | |
e2b9a3d7 LZ |
2193 | if (!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL)) |
2194 | BUG(); | |
cd55f3c2 RA |
2195 | if (!alloc_cpumask_var(&top_cpuset.cpus_requested, GFP_KERNEL)) |
2196 | BUG(); | |
58568d2a | 2197 | |
300ed6cb | 2198 | cpumask_setall(top_cpuset.cpus_allowed); |
cd55f3c2 | 2199 | cpumask_setall(top_cpuset.cpus_requested); |
f9a86fcb | 2200 | nodes_setall(top_cpuset.mems_allowed); |
e2b9a3d7 LZ |
2201 | cpumask_setall(top_cpuset.effective_cpus); |
2202 | nodes_setall(top_cpuset.effective_mems); | |
1da177e4 | 2203 | |
3e0d98b9 | 2204 | fmeter_init(&top_cpuset.fmeter); |
029190c5 | 2205 | set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags); |
1d3504fc | 2206 | top_cpuset.relax_domain_level = -1; |
1da177e4 | 2207 | |
1da177e4 LT |
2208 | err = register_filesystem(&cpuset_fs_type); |
2209 | if (err < 0) | |
8793d854 PM |
2210 | return err; |
2211 | ||
2341d1b6 LZ |
2212 | if (!alloc_cpumask_var(&cpus_attach, GFP_KERNEL)) |
2213 | BUG(); | |
2214 | ||
8793d854 | 2215 | return 0; |
1da177e4 LT |
2216 | } |
2217 | ||
b1aac8bb | 2218 | /* |
cf417141 | 2219 | * If CPU and/or memory hotplug handlers, below, unplug any CPUs |
b1aac8bb PJ |
2220 | * or memory nodes, we need to walk over the cpuset hierarchy, |
2221 | * removing that CPU or node from all cpusets. If this removes the | |
956db3ca CW |
2222 | * last CPU or node from a cpuset, then move the tasks in the empty |
2223 | * cpuset to its next-highest non-empty parent. | |
b1aac8bb | 2224 | */ |
956db3ca CW |
2225 | static void remove_tasks_in_empty_cpuset(struct cpuset *cs) |
2226 | { | |
2227 | struct cpuset *parent; | |
2228 | ||
956db3ca CW |
2229 | /* |
2230 | * Find its next-highest non-empty parent, (top cpuset | |
2231 | * has online cpus, so can't be empty). | |
2232 | */ | |
c431069f | 2233 | parent = parent_cs(cs); |
300ed6cb | 2234 | while (cpumask_empty(parent->cpus_allowed) || |
b4501295 | 2235 | nodes_empty(parent->mems_allowed)) |
c431069f | 2236 | parent = parent_cs(parent); |
956db3ca | 2237 | |
8cc99345 | 2238 | if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { |
12d3089c | 2239 | pr_err("cpuset: failed to transfer tasks out of empty cpuset "); |
e61734c5 TH |
2240 | pr_cont_cgroup_name(cs->css.cgroup); |
2241 | pr_cont("\n"); | |
8cc99345 | 2242 | } |
956db3ca CW |
2243 | } |
2244 | ||
be4c9dd7 LZ |
2245 | static void |
2246 | hotplug_update_tasks_legacy(struct cpuset *cs, | |
2247 | struct cpumask *new_cpus, nodemask_t *new_mems, | |
2248 | bool cpus_updated, bool mems_updated) | |
390a36aa LZ |
2249 | { |
2250 | bool is_empty; | |
2251 | ||
8447a0fe | 2252 | spin_lock_irq(&callback_lock); |
be4c9dd7 LZ |
2253 | cpumask_copy(cs->cpus_allowed, new_cpus); |
2254 | cpumask_copy(cs->effective_cpus, new_cpus); | |
2255 | cs->mems_allowed = *new_mems; | |
2256 | cs->effective_mems = *new_mems; | |
8447a0fe | 2257 | spin_unlock_irq(&callback_lock); |
390a36aa LZ |
2258 | |
2259 | /* | |
2260 | * Don't call update_tasks_cpumask() if the cpuset becomes empty, | |
2261 | * as the tasks will be migratecd to an ancestor. | |
2262 | */ | |
be4c9dd7 | 2263 | if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) |
390a36aa | 2264 | update_tasks_cpumask(cs); |
be4c9dd7 | 2265 | if (mems_updated && !nodes_empty(cs->mems_allowed)) |
390a36aa LZ |
2266 | update_tasks_nodemask(cs); |
2267 | ||
2268 | is_empty = cpumask_empty(cs->cpus_allowed) || | |
2269 | nodes_empty(cs->mems_allowed); | |
2270 | ||
2271 | mutex_unlock(&cpuset_mutex); | |
2272 | ||
2273 | /* | |
2274 | * Move tasks to the nearest ancestor with execution resources, | |
2275 | * This is full cgroup operation which will also call back into | |
2276 | * cpuset. Should be done outside any lock. | |
2277 | */ | |
2278 | if (is_empty) | |
2279 | remove_tasks_in_empty_cpuset(cs); | |
2280 | ||
2281 | mutex_lock(&cpuset_mutex); | |
2282 | } | |
2283 | ||
be4c9dd7 LZ |
2284 | static void |
2285 | hotplug_update_tasks(struct cpuset *cs, | |
2286 | struct cpumask *new_cpus, nodemask_t *new_mems, | |
2287 | bool cpus_updated, bool mems_updated) | |
390a36aa | 2288 | { |
be4c9dd7 LZ |
2289 | if (cpumask_empty(new_cpus)) |
2290 | cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); | |
2291 | if (nodes_empty(*new_mems)) | |
2292 | *new_mems = parent_cs(cs)->effective_mems; | |
2293 | ||
8447a0fe | 2294 | spin_lock_irq(&callback_lock); |
be4c9dd7 LZ |
2295 | cpumask_copy(cs->effective_cpus, new_cpus); |
2296 | cs->effective_mems = *new_mems; | |
8447a0fe | 2297 | spin_unlock_irq(&callback_lock); |
390a36aa | 2298 | |
be4c9dd7 | 2299 | if (cpus_updated) |
390a36aa | 2300 | update_tasks_cpumask(cs); |
be4c9dd7 | 2301 | if (mems_updated) |
390a36aa LZ |
2302 | update_tasks_nodemask(cs); |
2303 | } | |
2304 | ||
deb7aa30 | 2305 | /** |
388afd85 | 2306 | * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug |
deb7aa30 | 2307 | * @cs: cpuset in interest |
956db3ca | 2308 | * |
deb7aa30 TH |
2309 | * Compare @cs's cpu and mem masks against top_cpuset and if some have gone |
2310 | * offline, update @cs accordingly. If @cs ends up with no CPU or memory, | |
2311 | * all its tasks are moved to the nearest ancestor with both resources. | |
80d1fa64 | 2312 | */ |
388afd85 | 2313 | static void cpuset_hotplug_update_tasks(struct cpuset *cs) |
80d1fa64 | 2314 | { |
be4c9dd7 LZ |
2315 | static cpumask_t new_cpus; |
2316 | static nodemask_t new_mems; | |
2317 | bool cpus_updated; | |
2318 | bool mems_updated; | |
e44193d3 LZ |
2319 | retry: |
2320 | wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); | |
80d1fa64 | 2321 | |
5d21cc2d | 2322 | mutex_lock(&cpuset_mutex); |
7ddf96b0 | 2323 | |
e44193d3 LZ |
2324 | /* |
2325 | * We have raced with task attaching. We wait until attaching | |
2326 | * is finished, so we won't attach a task to an empty cpuset. | |
2327 | */ | |
2328 | if (cs->attach_in_progress) { | |
2329 | mutex_unlock(&cpuset_mutex); | |
2330 | goto retry; | |
2331 | } | |
2332 | ||
cd55f3c2 | 2333 | cpumask_and(&new_cpus, cs->cpus_requested, parent_cs(cs)->effective_cpus); |
be4c9dd7 | 2334 | nodes_and(new_mems, cs->mems_allowed, parent_cs(cs)->effective_mems); |
80d1fa64 | 2335 | |
be4c9dd7 LZ |
2336 | cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); |
2337 | mems_updated = !nodes_equal(new_mems, cs->effective_mems); | |
deb7aa30 | 2338 | |
9e10a130 | 2339 | if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) |
be4c9dd7 LZ |
2340 | hotplug_update_tasks(cs, &new_cpus, &new_mems, |
2341 | cpus_updated, mems_updated); | |
390a36aa | 2342 | else |
be4c9dd7 LZ |
2343 | hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems, |
2344 | cpus_updated, mems_updated); | |
8d033948 | 2345 | |
5d21cc2d | 2346 | mutex_unlock(&cpuset_mutex); |
b1aac8bb PJ |
2347 | } |
2348 | ||
90fd6738 PZ |
2349 | static bool force_rebuild; |
2350 | ||
2351 | void cpuset_force_rebuild(void) | |
2352 | { | |
2353 | force_rebuild = true; | |
2354 | } | |
2355 | ||
deb7aa30 | 2356 | /** |
3a5a6d0c | 2357 | * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset |
956db3ca | 2358 | * |
deb7aa30 TH |
2359 | * This function is called after either CPU or memory configuration has |
2360 | * changed and updates cpuset accordingly. The top_cpuset is always | |
2361 | * synchronized to cpu_active_mask and N_MEMORY, which is necessary in | |
2362 | * order to make cpusets transparent (of no affect) on systems that are | |
2363 | * actively using CPU hotplug but making no active use of cpusets. | |
956db3ca | 2364 | * |
deb7aa30 | 2365 | * Non-root cpusets are only affected by offlining. If any CPUs or memory |
388afd85 LZ |
2366 | * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on |
2367 | * all descendants. | |
956db3ca | 2368 | * |
deb7aa30 TH |
2369 | * Note that CPU offlining during suspend is ignored. We don't modify |
2370 | * cpusets across suspend/resume cycles at all. | |
956db3ca | 2371 | */ |
3a5a6d0c | 2372 | static void cpuset_hotplug_workfn(struct work_struct *work) |
b1aac8bb | 2373 | { |
5c5cc623 LZ |
2374 | static cpumask_t new_cpus; |
2375 | static nodemask_t new_mems; | |
deb7aa30 | 2376 | bool cpus_updated, mems_updated; |
9e10a130 | 2377 | bool on_dfl = cgroup_subsys_on_dfl(cpuset_cgrp_subsys); |
b1aac8bb | 2378 | |
5d21cc2d | 2379 | mutex_lock(&cpuset_mutex); |
956db3ca | 2380 | |
deb7aa30 TH |
2381 | /* fetch the available cpus/mems and find out which changed how */ |
2382 | cpumask_copy(&new_cpus, cpu_active_mask); | |
2383 | new_mems = node_states[N_MEMORY]; | |
7ddf96b0 | 2384 | |
7e88291b LZ |
2385 | cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus); |
2386 | mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems); | |
7ddf96b0 | 2387 | |
deb7aa30 TH |
2388 | /* synchronize cpus_allowed to cpu_active_mask */ |
2389 | if (cpus_updated) { | |
8447a0fe | 2390 | spin_lock_irq(&callback_lock); |
7e88291b LZ |
2391 | if (!on_dfl) |
2392 | cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); | |
1344ab9c | 2393 | cpumask_copy(top_cpuset.effective_cpus, &new_cpus); |
8447a0fe | 2394 | spin_unlock_irq(&callback_lock); |
deb7aa30 TH |
2395 | /* we don't mess with cpumasks of tasks in top_cpuset */ |
2396 | } | |
b4501295 | 2397 | |
deb7aa30 TH |
2398 | /* synchronize mems_allowed to N_MEMORY */ |
2399 | if (mems_updated) { | |
8447a0fe | 2400 | spin_lock_irq(&callback_lock); |
7e88291b LZ |
2401 | if (!on_dfl) |
2402 | top_cpuset.mems_allowed = new_mems; | |
1344ab9c | 2403 | top_cpuset.effective_mems = new_mems; |
8447a0fe | 2404 | spin_unlock_irq(&callback_lock); |
d66393e5 | 2405 | update_tasks_nodemask(&top_cpuset); |
deb7aa30 | 2406 | } |
b4501295 | 2407 | |
388afd85 LZ |
2408 | mutex_unlock(&cpuset_mutex); |
2409 | ||
5c5cc623 LZ |
2410 | /* if cpus or mems changed, we need to propagate to descendants */ |
2411 | if (cpus_updated || mems_updated) { | |
deb7aa30 | 2412 | struct cpuset *cs; |
492eb21b | 2413 | struct cgroup_subsys_state *pos_css; |
f9b4fb8d | 2414 | |
fc560a26 | 2415 | rcu_read_lock(); |
492eb21b | 2416 | cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { |
ec903c0c | 2417 | if (cs == &top_cpuset || !css_tryget_online(&cs->css)) |
388afd85 LZ |
2418 | continue; |
2419 | rcu_read_unlock(); | |
7ddf96b0 | 2420 | |
388afd85 | 2421 | cpuset_hotplug_update_tasks(cs); |
b4501295 | 2422 | |
388afd85 LZ |
2423 | rcu_read_lock(); |
2424 | css_put(&cs->css); | |
2425 | } | |
2426 | rcu_read_unlock(); | |
2427 | } | |
8d033948 | 2428 | |
deb7aa30 | 2429 | /* rebuild sched domains if cpus_allowed has changed */ |
90fd6738 PZ |
2430 | if (cpus_updated || force_rebuild) { |
2431 | force_rebuild = false; | |
e0e80a02 | 2432 | rebuild_sched_domains(); |
90fd6738 | 2433 | } |
b1aac8bb PJ |
2434 | } |
2435 | ||
7ddf96b0 | 2436 | void cpuset_update_active_cpus(bool cpu_online) |
4c4d50f7 | 2437 | { |
3a5a6d0c TH |
2438 | /* |
2439 | * We're inside cpu hotplug critical region which usually nests | |
2440 | * inside cgroup synchronization. Bounce actual hotplug processing | |
2441 | * to a work item to avoid reverse locking order. | |
2442 | * | |
2443 | * We still need to do partition_sched_domains() synchronously; | |
2444 | * otherwise, the scheduler will get confused and put tasks to the | |
2445 | * dead CPU. Fall back to the default single domain. | |
2446 | * cpuset_hotplug_workfn() will rebuild it as necessary. | |
2447 | */ | |
2448 | partition_sched_domains(1, NULL, NULL); | |
2449 | schedule_work(&cpuset_hotplug_work); | |
4c4d50f7 | 2450 | } |
4c4d50f7 | 2451 | |
90fd6738 PZ |
2452 | void cpuset_wait_for_hotplug(void) |
2453 | { | |
2454 | flush_work(&cpuset_hotplug_work); | |
2455 | } | |
2456 | ||
38837fc7 | 2457 | /* |
38d7bee9 LJ |
2458 | * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY]. |
2459 | * Call this routine anytime after node_states[N_MEMORY] changes. | |
a1cd2b13 | 2460 | * See cpuset_update_active_cpus() for CPU hotplug handling. |
38837fc7 | 2461 | */ |
f481891f MX |
2462 | static int cpuset_track_online_nodes(struct notifier_block *self, |
2463 | unsigned long action, void *arg) | |
38837fc7 | 2464 | { |
3a5a6d0c | 2465 | schedule_work(&cpuset_hotplug_work); |
f481891f | 2466 | return NOTIFY_OK; |
38837fc7 | 2467 | } |
d8f10cb3 AM |
2468 | |
2469 | static struct notifier_block cpuset_track_online_nodes_nb = { | |
2470 | .notifier_call = cpuset_track_online_nodes, | |
2471 | .priority = 10, /* ??! */ | |
2472 | }; | |
38837fc7 | 2473 | |
1da177e4 LT |
2474 | /** |
2475 | * cpuset_init_smp - initialize cpus_allowed | |
2476 | * | |
2477 | * Description: Finish top cpuset after cpu, node maps are initialized | |
d8f10cb3 | 2478 | */ |
1da177e4 LT |
2479 | void __init cpuset_init_smp(void) |
2480 | { | |
6ad4c188 | 2481 | cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask); |
38d7bee9 | 2482 | top_cpuset.mems_allowed = node_states[N_MEMORY]; |
33ad801d | 2483 | top_cpuset.old_mems_allowed = top_cpuset.mems_allowed; |
4c4d50f7 | 2484 | |
e2b9a3d7 LZ |
2485 | cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask); |
2486 | top_cpuset.effective_mems = node_states[N_MEMORY]; | |
2487 | ||
d8f10cb3 | 2488 | register_hotmemory_notifier(&cpuset_track_online_nodes_nb); |
fff4dc84 TH |
2489 | |
2490 | cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0); | |
2491 | BUG_ON(!cpuset_migrate_mm_wq); | |
1da177e4 LT |
2492 | } |
2493 | ||
2494 | /** | |
1da177e4 LT |
2495 | * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. |
2496 | * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. | |
6af866af | 2497 | * @pmask: pointer to struct cpumask variable to receive cpus_allowed set. |
1da177e4 | 2498 | * |
300ed6cb | 2499 | * Description: Returns the cpumask_var_t cpus_allowed of the cpuset |
1da177e4 | 2500 | * attached to the specified @tsk. Guaranteed to return some non-empty |
5f054e31 | 2501 | * subset of cpu_online_mask, even if this means going outside the |
1da177e4 LT |
2502 | * tasks cpuset. |
2503 | **/ | |
2504 | ||
6af866af | 2505 | void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) |
1da177e4 | 2506 | { |
8447a0fe VD |
2507 | unsigned long flags; |
2508 | ||
2509 | spin_lock_irqsave(&callback_lock, flags); | |
b8dadcb5 | 2510 | rcu_read_lock(); |
ae1c8023 | 2511 | guarantee_online_cpus(task_cs(tsk), pmask); |
b8dadcb5 | 2512 | rcu_read_unlock(); |
8447a0fe | 2513 | spin_unlock_irqrestore(&callback_lock, flags); |
1da177e4 LT |
2514 | } |
2515 | ||
2baab4e9 | 2516 | void cpuset_cpus_allowed_fallback(struct task_struct *tsk) |
9084bb82 | 2517 | { |
9084bb82 | 2518 | rcu_read_lock(); |
ae1c8023 | 2519 | do_set_cpus_allowed(tsk, task_cs(tsk)->effective_cpus); |
9084bb82 ON |
2520 | rcu_read_unlock(); |
2521 | ||
2522 | /* | |
2523 | * We own tsk->cpus_allowed, nobody can change it under us. | |
2524 | * | |
2525 | * But we used cs && cs->cpus_allowed lockless and thus can | |
2526 | * race with cgroup_attach_task() or update_cpumask() and get | |
2527 | * the wrong tsk->cpus_allowed. However, both cases imply the | |
2528 | * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr() | |
2529 | * which takes task_rq_lock(). | |
2530 | * | |
2531 | * If we are called after it dropped the lock we must see all | |
2532 | * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary | |
2533 | * set any mask even if it is not right from task_cs() pov, | |
2534 | * the pending set_cpus_allowed_ptr() will fix things. | |
2baab4e9 PZ |
2535 | * |
2536 | * select_fallback_rq() will fix things ups and set cpu_possible_mask | |
2537 | * if required. | |
9084bb82 | 2538 | */ |
9084bb82 ON |
2539 | } |
2540 | ||
8f4ab07f | 2541 | void __init cpuset_init_current_mems_allowed(void) |
1da177e4 | 2542 | { |
f9a86fcb | 2543 | nodes_setall(current->mems_allowed); |
1da177e4 LT |
2544 | } |
2545 | ||
909d75a3 PJ |
2546 | /** |
2547 | * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset. | |
2548 | * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed. | |
2549 | * | |
2550 | * Description: Returns the nodemask_t mems_allowed of the cpuset | |
2551 | * attached to the specified @tsk. Guaranteed to return some non-empty | |
38d7bee9 | 2552 | * subset of node_states[N_MEMORY], even if this means going outside the |
909d75a3 PJ |
2553 | * tasks cpuset. |
2554 | **/ | |
2555 | ||
2556 | nodemask_t cpuset_mems_allowed(struct task_struct *tsk) | |
2557 | { | |
2558 | nodemask_t mask; | |
8447a0fe | 2559 | unsigned long flags; |
909d75a3 | 2560 | |
8447a0fe | 2561 | spin_lock_irqsave(&callback_lock, flags); |
b8dadcb5 | 2562 | rcu_read_lock(); |
ae1c8023 | 2563 | guarantee_online_mems(task_cs(tsk), &mask); |
b8dadcb5 | 2564 | rcu_read_unlock(); |
8447a0fe | 2565 | spin_unlock_irqrestore(&callback_lock, flags); |
909d75a3 PJ |
2566 | |
2567 | return mask; | |
2568 | } | |
2569 | ||
d9fd8a6d | 2570 | /** |
19770b32 MG |
2571 | * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed |
2572 | * @nodemask: the nodemask to be checked | |
d9fd8a6d | 2573 | * |
19770b32 | 2574 | * Are any of the nodes in the nodemask allowed in current->mems_allowed? |
1da177e4 | 2575 | */ |
19770b32 | 2576 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) |
1da177e4 | 2577 | { |
19770b32 | 2578 | return nodes_intersects(*nodemask, current->mems_allowed); |
1da177e4 LT |
2579 | } |
2580 | ||
9bf2229f | 2581 | /* |
78608366 PM |
2582 | * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or |
2583 | * mem_hardwall ancestor to the specified cpuset. Call holding | |
8447a0fe | 2584 | * callback_lock. If no ancestor is mem_exclusive or mem_hardwall |
78608366 | 2585 | * (an unusual configuration), then returns the root cpuset. |
9bf2229f | 2586 | */ |
c9710d80 | 2587 | static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) |
9bf2229f | 2588 | { |
c431069f TH |
2589 | while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) |
2590 | cs = parent_cs(cs); | |
9bf2229f PJ |
2591 | return cs; |
2592 | } | |
2593 | ||
d9fd8a6d | 2594 | /** |
344736f2 | 2595 | * cpuset_node_allowed - Can we allocate on a memory node? |
a1bc5a4e | 2596 | * @node: is this an allowed node? |
02a0e53d | 2597 | * @gfp_mask: memory allocation flags |
d9fd8a6d | 2598 | * |
6e276d2a DR |
2599 | * If we're in interrupt, yes, we can always allocate. If @node is set in |
2600 | * current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this | |
2601 | * node is set in the nearest hardwalled cpuset ancestor to current's cpuset, | |
2602 | * yes. If current has access to memory reserves due to TIF_MEMDIE, yes. | |
9bf2229f PJ |
2603 | * Otherwise, no. |
2604 | * | |
2605 | * GFP_USER allocations are marked with the __GFP_HARDWALL bit, | |
c596d9f3 DR |
2606 | * and do not allow allocations outside the current tasks cpuset |
2607 | * unless the task has been OOM killed as is marked TIF_MEMDIE. | |
9bf2229f | 2608 | * GFP_KERNEL allocations are not so marked, so can escape to the |
78608366 | 2609 | * nearest enclosing hardwalled ancestor cpuset. |
9bf2229f | 2610 | * |
8447a0fe | 2611 | * Scanning up parent cpusets requires callback_lock. The |
02a0e53d PJ |
2612 | * __alloc_pages() routine only calls here with __GFP_HARDWALL bit |
2613 | * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the | |
2614 | * current tasks mems_allowed came up empty on the first pass over | |
2615 | * the zonelist. So only GFP_KERNEL allocations, if all nodes in the | |
8447a0fe | 2616 | * cpuset are short of memory, might require taking the callback_lock. |
9bf2229f | 2617 | * |
36be57ff | 2618 | * The first call here from mm/page_alloc:get_page_from_freelist() |
02a0e53d PJ |
2619 | * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets, |
2620 | * so no allocation on a node outside the cpuset is allowed (unless | |
2621 | * in interrupt, of course). | |
36be57ff PJ |
2622 | * |
2623 | * The second pass through get_page_from_freelist() doesn't even call | |
2624 | * here for GFP_ATOMIC calls. For those calls, the __alloc_pages() | |
2625 | * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set | |
2626 | * in alloc_flags. That logic and the checks below have the combined | |
2627 | * affect that: | |
9bf2229f PJ |
2628 | * in_interrupt - any node ok (current task context irrelevant) |
2629 | * GFP_ATOMIC - any node ok | |
c596d9f3 | 2630 | * TIF_MEMDIE - any node ok |
78608366 | 2631 | * GFP_KERNEL - any node in enclosing hardwalled cpuset ok |
9bf2229f | 2632 | * GFP_USER - only nodes in current tasks mems allowed ok. |
02a0e53d | 2633 | */ |
344736f2 | 2634 | int __cpuset_node_allowed(int node, gfp_t gfp_mask) |
1da177e4 | 2635 | { |
c9710d80 | 2636 | struct cpuset *cs; /* current cpuset ancestors */ |
29afd49b | 2637 | int allowed; /* is allocation in zone z allowed? */ |
8447a0fe | 2638 | unsigned long flags; |
9bf2229f | 2639 | |
6e276d2a | 2640 | if (in_interrupt()) |
9bf2229f | 2641 | return 1; |
9bf2229f PJ |
2642 | if (node_isset(node, current->mems_allowed)) |
2643 | return 1; | |
c596d9f3 DR |
2644 | /* |
2645 | * Allow tasks that have access to memory reserves because they have | |
2646 | * been OOM killed to get memory anywhere. | |
2647 | */ | |
2648 | if (unlikely(test_thread_flag(TIF_MEMDIE))) | |
2649 | return 1; | |
9bf2229f PJ |
2650 | if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */ |
2651 | return 0; | |
2652 | ||
5563e770 BP |
2653 | if (current->flags & PF_EXITING) /* Let dying task have memory */ |
2654 | return 1; | |
2655 | ||
9bf2229f | 2656 | /* Not hardwall and node outside mems_allowed: scan up cpusets */ |
8447a0fe | 2657 | spin_lock_irqsave(&callback_lock, flags); |
053199ed | 2658 | |
b8dadcb5 | 2659 | rcu_read_lock(); |
78608366 | 2660 | cs = nearest_hardwall_ancestor(task_cs(current)); |
99afb0fd | 2661 | allowed = node_isset(node, cs->mems_allowed); |
b8dadcb5 | 2662 | rcu_read_unlock(); |
053199ed | 2663 | |
8447a0fe | 2664 | spin_unlock_irqrestore(&callback_lock, flags); |
9bf2229f | 2665 | return allowed; |
1da177e4 LT |
2666 | } |
2667 | ||
825a46af | 2668 | /** |
6adef3eb JS |
2669 | * cpuset_mem_spread_node() - On which node to begin search for a file page |
2670 | * cpuset_slab_spread_node() - On which node to begin search for a slab page | |
825a46af PJ |
2671 | * |
2672 | * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for | |
2673 | * tasks in a cpuset with is_spread_page or is_spread_slab set), | |
2674 | * and if the memory allocation used cpuset_mem_spread_node() | |
2675 | * to determine on which node to start looking, as it will for | |
2676 | * certain page cache or slab cache pages such as used for file | |
2677 | * system buffers and inode caches, then instead of starting on the | |
2678 | * local node to look for a free page, rather spread the starting | |
2679 | * node around the tasks mems_allowed nodes. | |
2680 | * | |
2681 | * We don't have to worry about the returned node being offline | |
2682 | * because "it can't happen", and even if it did, it would be ok. | |
2683 | * | |
2684 | * The routines calling guarantee_online_mems() are careful to | |
2685 | * only set nodes in task->mems_allowed that are online. So it | |
2686 | * should not be possible for the following code to return an | |
2687 | * offline node. But if it did, that would be ok, as this routine | |
2688 | * is not returning the node where the allocation must be, only | |
2689 | * the node where the search should start. The zonelist passed to | |
2690 | * __alloc_pages() will include all nodes. If the slab allocator | |
2691 | * is passed an offline node, it will fall back to the local node. | |
2692 | * See kmem_cache_alloc_node(). | |
2693 | */ | |
2694 | ||
6adef3eb | 2695 | static int cpuset_spread_node(int *rotor) |
825a46af PJ |
2696 | { |
2697 | int node; | |
2698 | ||
6adef3eb | 2699 | node = next_node(*rotor, current->mems_allowed); |
825a46af PJ |
2700 | if (node == MAX_NUMNODES) |
2701 | node = first_node(current->mems_allowed); | |
6adef3eb | 2702 | *rotor = node; |
825a46af PJ |
2703 | return node; |
2704 | } | |
6adef3eb JS |
2705 | |
2706 | int cpuset_mem_spread_node(void) | |
2707 | { | |
778d3b0f MH |
2708 | if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE) |
2709 | current->cpuset_mem_spread_rotor = | |
2710 | node_random(¤t->mems_allowed); | |
2711 | ||
6adef3eb JS |
2712 | return cpuset_spread_node(¤t->cpuset_mem_spread_rotor); |
2713 | } | |
2714 | ||
2715 | int cpuset_slab_spread_node(void) | |
2716 | { | |
778d3b0f MH |
2717 | if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE) |
2718 | current->cpuset_slab_spread_rotor = | |
2719 | node_random(¤t->mems_allowed); | |
2720 | ||
6adef3eb JS |
2721 | return cpuset_spread_node(¤t->cpuset_slab_spread_rotor); |
2722 | } | |
2723 | ||
825a46af PJ |
2724 | EXPORT_SYMBOL_GPL(cpuset_mem_spread_node); |
2725 | ||
ef08e3b4 | 2726 | /** |
bbe373f2 DR |
2727 | * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's? |
2728 | * @tsk1: pointer to task_struct of some task. | |
2729 | * @tsk2: pointer to task_struct of some other task. | |
2730 | * | |
2731 | * Description: Return true if @tsk1's mems_allowed intersects the | |
2732 | * mems_allowed of @tsk2. Used by the OOM killer to determine if | |
2733 | * one of the task's memory usage might impact the memory available | |
2734 | * to the other. | |
ef08e3b4 PJ |
2735 | **/ |
2736 | ||
bbe373f2 DR |
2737 | int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
2738 | const struct task_struct *tsk2) | |
ef08e3b4 | 2739 | { |
bbe373f2 | 2740 | return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed); |
ef08e3b4 PJ |
2741 | } |
2742 | ||
75aa1994 | 2743 | /** |
da39da3a | 2744 | * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed |
75aa1994 | 2745 | * |
da39da3a | 2746 | * Description: Prints current's name, cpuset name, and cached copy of its |
b8dadcb5 | 2747 | * mems_allowed to the kernel log. |
75aa1994 | 2748 | */ |
da39da3a | 2749 | void cpuset_print_current_mems_allowed(void) |
75aa1994 | 2750 | { |
b8dadcb5 | 2751 | struct cgroup *cgrp; |
75aa1994 | 2752 | |
b8dadcb5 | 2753 | rcu_read_lock(); |
63f43f55 | 2754 | |
da39da3a DR |
2755 | cgrp = task_cs(current)->css.cgroup; |
2756 | pr_info("%s cpuset=", current->comm); | |
e61734c5 | 2757 | pr_cont_cgroup_name(cgrp); |
da39da3a DR |
2758 | pr_cont(" mems_allowed=%*pbl\n", |
2759 | nodemask_pr_args(¤t->mems_allowed)); | |
f440d98f | 2760 | |
cfb5966b | 2761 | rcu_read_unlock(); |
75aa1994 DR |
2762 | } |
2763 | ||
3e0d98b9 PJ |
2764 | /* |
2765 | * Collection of memory_pressure is suppressed unless | |
2766 | * this flag is enabled by writing "1" to the special | |
2767 | * cpuset file 'memory_pressure_enabled' in the root cpuset. | |
2768 | */ | |
2769 | ||
c5b2aff8 | 2770 | int cpuset_memory_pressure_enabled __read_mostly; |
3e0d98b9 PJ |
2771 | |
2772 | /** | |
2773 | * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims. | |
2774 | * | |
2775 | * Keep a running average of the rate of synchronous (direct) | |
2776 | * page reclaim efforts initiated by tasks in each cpuset. | |
2777 | * | |
2778 | * This represents the rate at which some task in the cpuset | |
2779 | * ran low on memory on all nodes it was allowed to use, and | |
2780 | * had to enter the kernels page reclaim code in an effort to | |
2781 | * create more free memory by tossing clean pages or swapping | |
2782 | * or writing dirty pages. | |
2783 | * | |
2784 | * Display to user space in the per-cpuset read-only file | |
2785 | * "memory_pressure". Value displayed is an integer | |
2786 | * representing the recent rate of entry into the synchronous | |
2787 | * (direct) page reclaim by any task attached to the cpuset. | |
2788 | **/ | |
2789 | ||
2790 | void __cpuset_memory_pressure_bump(void) | |
2791 | { | |
b8dadcb5 | 2792 | rcu_read_lock(); |
8793d854 | 2793 | fmeter_markevent(&task_cs(current)->fmeter); |
b8dadcb5 | 2794 | rcu_read_unlock(); |
3e0d98b9 PJ |
2795 | } |
2796 | ||
8793d854 | 2797 | #ifdef CONFIG_PROC_PID_CPUSET |
1da177e4 LT |
2798 | /* |
2799 | * proc_cpuset_show() | |
2800 | * - Print tasks cpuset path into seq_file. | |
2801 | * - Used for /proc/<pid>/cpuset. | |
053199ed PJ |
2802 | * - No need to task_lock(tsk) on this tsk->cpuset reference, as it |
2803 | * doesn't really matter if tsk->cpuset changes after we read it, | |
5d21cc2d | 2804 | * and we take cpuset_mutex, keeping cpuset_attach() from changing it |
2df167a3 | 2805 | * anyway. |
1da177e4 | 2806 | */ |
52de4779 ZL |
2807 | int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, |
2808 | struct pid *pid, struct task_struct *tsk) | |
1da177e4 | 2809 | { |
e61734c5 | 2810 | char *buf, *p; |
8793d854 | 2811 | struct cgroup_subsys_state *css; |
99f89551 | 2812 | int retval; |
1da177e4 | 2813 | |
99f89551 | 2814 | retval = -ENOMEM; |
e61734c5 | 2815 | buf = kmalloc(PATH_MAX, GFP_KERNEL); |
1da177e4 | 2816 | if (!buf) |
99f89551 EB |
2817 | goto out; |
2818 | ||
e61734c5 | 2819 | retval = -ENAMETOOLONG; |
27e89ae5 | 2820 | rcu_read_lock(); |
073219e9 | 2821 | css = task_css(tsk, cpuset_cgrp_id); |
e61734c5 | 2822 | p = cgroup_path(css->cgroup, buf, PATH_MAX); |
27e89ae5 | 2823 | rcu_read_unlock(); |
e61734c5 | 2824 | if (!p) |
52de4779 | 2825 | goto out_free; |
e61734c5 | 2826 | seq_puts(m, p); |
1da177e4 | 2827 | seq_putc(m, '\n'); |
e61734c5 | 2828 | retval = 0; |
99f89551 | 2829 | out_free: |
1da177e4 | 2830 | kfree(buf); |
99f89551 | 2831 | out: |
1da177e4 LT |
2832 | return retval; |
2833 | } | |
8793d854 | 2834 | #endif /* CONFIG_PROC_PID_CPUSET */ |
1da177e4 | 2835 | |
d01d4827 | 2836 | /* Display task mems_allowed in /proc/<pid>/status file. */ |
df5f8314 EB |
2837 | void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task) |
2838 | { | |
e8e6d97c TH |
2839 | seq_printf(m, "Mems_allowed:\t%*pb\n", |
2840 | nodemask_pr_args(&task->mems_allowed)); | |
2841 | seq_printf(m, "Mems_allowed_list:\t%*pbl\n", | |
2842 | nodemask_pr_args(&task->mems_allowed)); | |
1da177e4 | 2843 | } |