extern int number_of_cpusets; /* How many cpusets are defined in system? */
+extern int cpuset_init_early(void);
extern int cpuset_init(void);
extern void cpuset_init_smp(void);
extern void cpuset_fork(struct task_struct *p);
#else /* !CONFIG_CPUSETS */
+static inline int cpuset_init_early(void) { return 0; }
static inline int cpuset_init(void) { return 0; }
static inline void cpuset_init_smp(void) {}
static inline void cpuset_fork(struct task_struct *p) {}
* Do not call this routine if in_interrupt().
*
* Call without callback_sem or task_lock() held. May be called
- * with or without manage_sem held. Except in early boot or
- * an exiting task, when tsk->cpuset is NULL, this routine will
- * acquire task_lock(). We don't need to use task_lock to guard
+ * with or without manage_sem held. Doesn't need task_lock to guard
* against another task changing a non-NULL cpuset pointer to NULL,
* as that is only done by a task on itself, and if the current task
* is here, it is not simultaneously in the exit code NULL'ing its
struct task_struct *tsk = current;
struct cpuset *cs = tsk->cpuset;
- if (unlikely(!cs))
- return;
-
task_lock(tsk);
my_cpusets_mem_gen = cs->mems_generation;
task_unlock(tsk);
return 0;
}
+/*
+ * cpuset_init_early - just enough so that the calls to
+ * cpuset_update_task_memory_state() in early init code
+ * are harmless.
+ */
+
+int __init cpuset_init_early(void)
+{
+ struct task_struct *tsk = current;
+
+ tsk->cpuset = &top_cpuset;
+ tsk->cpuset->mems_generation = atomic_read(&cpuset_mems_generation);
+ return 0;
+}
+
/**
* cpuset_init - initialize cpusets at system boot
*