config ARCH_THREAD_INFO_ALLOCATOR
bool
+# Select if arch wants to size task_struct dynamically via arch_task_struct_size:
+config ARCH_WANTS_DYNAMIC_TASK_STRUCT
+ bool
+
config HAVE_REGS_AND_STACK_ACCESS_API
bool
help
select ARCH_USE_CMPXCHG_LOCKREF if X86_64
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
+ select ARCH_WANTS_DYNAMIC_TASK_STRUCT
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_IPC_PARSE_VERSION if X86_32
select ARCH_WANT_OPTIONAL_GPIOLIB
#include <asm/fpu/internal.h>
#include <asm/tlbflush.h>
+#include <linux/sched.h>
+
/*
* Initialize the TS bit in CR0 according to the style of context-switches
* we are using:
unsigned int xstate_size;
EXPORT_SYMBOL_GPL(xstate_size);
-#define CHECK_MEMBER_AT_END_OF(TYPE, MEMBER) \
- BUILD_BUG_ON((sizeof(TYPE) - \
- offsetof(TYPE, MEMBER) - \
- sizeof(((TYPE *)0)->MEMBER)) > \
- 0) \
+/* Enforce that 'MEMBER' is the last field of 'TYPE': */
+#define CHECK_MEMBER_AT_END_OF(TYPE, MEMBER) \
+ BUILD_BUG_ON(sizeof(TYPE) != offsetofend(TYPE, MEMBER))
/*
- * We append the 'struct fpu' to the task_struct.
+ * We append the 'struct fpu' to the task_struct:
*/
-int __weak arch_task_struct_size(void)
+static void __init fpu__init_task_struct_size(void)
{
int task_size = sizeof(struct task_struct);
CHECK_MEMBER_AT_END_OF(struct thread_struct, fpu);
CHECK_MEMBER_AT_END_OF(struct task_struct, thread);
- return task_size;
+ arch_task_struct_size = task_size;
}
/*
fpu__init_system_generic();
fpu__init_system_xstate_size_legacy();
fpu__init_system_xstate();
+ fpu__init_task_struct_size();
fpu__init_system_ctx_switch();
}
*/
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
- memcpy(dst, src, arch_task_struct_size());
+ memcpy(dst, src, arch_task_struct_size);
return fpu__copy(&dst->thread.fpu, &src->thread.fpu);
}
roundup(sizeof(CORE_STR), 4)) +
roundup(sizeof(struct elf_prstatus), 4) +
roundup(sizeof(struct elf_prpsinfo), 4) +
- roundup(arch_task_struct_size(), 4);
+ roundup(arch_task_struct_size, 4);
*elf_buflen = PAGE_ALIGN(*elf_buflen);
return size + *elf_buflen;
}
/* set up the task structure */
notes[2].name = CORE_STR;
notes[2].type = NT_TASKSTRUCT;
- notes[2].datasz = arch_task_struct_size();
+ notes[2].datasz = arch_task_struct_size;
notes[2].data = current;
nhdr->p_filesz += notesize(¬es[2]);
*/
};
-extern int arch_task_struct_size(void);
+#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
+extern int arch_task_struct_size __read_mostly;
+#else
+# define arch_task_struct_size (sizeof(struct task_struct))
+#endif
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS);
}
-int __weak arch_task_struct_size(void)
-{
- return sizeof(struct task_struct);
-}
+#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
+/* Initialized by the architecture: */
+int arch_task_struct_size __read_mostly;
+#endif
void __init fork_init(void)
{
- int task_struct_size = arch_task_struct_size();
#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
#ifndef ARCH_MIN_TASKALIGN
#define ARCH_MIN_TASKALIGN L1_CACHE_BYTES
#endif
/* create a slab on which task_structs can be allocated */
task_struct_cachep =
- kmem_cache_create("task_struct", task_struct_size,
+ kmem_cache_create("task_struct", arch_task_struct_size,
ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
#endif