return total;
}
+void __weak arch_release_task_struct(struct task_struct *tsk)
+{
+}
+
#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
static struct kmem_cache *task_struct_cachep;
return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
}
-void __weak arch_release_task_struct(struct task_struct *tsk) { }
-
static inline void free_task_struct(struct task_struct *tsk)
{
- arch_release_task_struct(tsk);
kmem_cache_free(task_struct_cachep, tsk);
}
#endif
+void __weak arch_release_thread_info(struct thread_info *ti)
+{
+}
+
#ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR
-void __weak arch_release_thread_info(struct thread_info *ti) { }
/*
* Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
static inline void free_thread_info(struct thread_info *ti)
{
- arch_release_thread_info(ti);
free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
}
# else
static void free_thread_info(struct thread_info *ti)
{
- arch_release_thread_info(ti);
kmem_cache_free(thread_info_cache, ti);
}
void free_task(struct task_struct *tsk)
{
account_kernel_stack(tsk->stack, -1);
+ arch_release_thread_info(tsk->stack);
free_thread_info(tsk->stack);
rt_mutex_debug_task_free(tsk);
ftrace_graph_exit_task(tsk);
put_seccomp_filter(tsk);
+ arch_release_task_struct(tsk);
free_task_struct(tsk);
}
EXPORT_SYMBOL(free_task);
return NULL;
ti = alloc_thread_info_node(tsk, node);
- if (!ti) {
- free_task_struct(tsk);
- return NULL;
- }
+ if (!ti)
+ goto free_tsk;
err = arch_dup_task_struct(tsk, orig);
if (err)
- goto out;
+ goto free_ti;
tsk->stack = ti;
return tsk;
-out:
+free_ti:
free_thread_info(ti);
+free_tsk:
free_task_struct(tsk);
return NULL;
}