From: Ingo Molnar Date: Wed, 3 Dec 2008 07:54:47 +0000 (+0100) Subject: Merge commit 'v2.6.28-rc7'; branch 'x86/dumpstack' into tracing/ftrace X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=dfdc5437bd62dd6a26961e27f26b671374749875;p=GitHub%2Fexynos8895%2Fandroid_kernel_samsung_universal8895.git Merge commit 'v2.6.28-rc7'; branch 'x86/dumpstack' into tracing/ftrace Merge x86/dumpstack into tracing/ftrace because upcoming ftrace changes depend on cleanups already in x86/dumpstack. Also merge to latest upstream -rc. --- dfdc5437bd62dd6a26961e27f26b671374749875 diff --cc arch/x86/kernel/Makefile index d274425fb076,b62a7667828e,db3216a9d2b9..a3049da61985 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@@@ -11,15 -11,9 -11,8 +11,15 @@@@ ifdef CONFIG_FUNCTION_TRACE CFLAGS_REMOVE_tsc.o = -pg CFLAGS_REMOVE_rtc.o = -pg CFLAGS_REMOVE_paravirt-spinlocks.o = -pg +CFLAGS_REMOVE_ftrace.o = -pg +endif + ++ifdef CONFIG_FUNCTION_GRAPH_TRACER ++# Don't trace __switch_to() but let it for function tracer ++CFLAGS_REMOVE_process_32.o = -pg ++CFLAGS_REMOVE_process_64.o = -pg + endif + # # vsyscalls (which work on the user stack) should have # no stack-protector checks: diff --cc include/linux/sched.h index 7ad48f2a2758,55e30d114477,5c38db536e07..2d0a93c31228 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@@@ -1356,26 -1347,15 -1345,7 +1360,26 @@@@ struct task_struct #ifdef CONFIG_LATENCYTOP int latency_record_count; struct latency_record latency_record[LT_SAVECOUNT]; +#endif + /* + * time slack values; these are used to round up poll() and + * select() etc timeout values. These are in nanoseconds. + */ + unsigned long timer_slack_ns; + unsigned long default_timer_slack_ns; + + struct list_head *scm_work_list; ++#ifdef CONFIG_FUNCTION_GRAPH_TRACER ++ /* Index of current stored adress in ret_stack */ ++ int curr_ret_stack; ++ /* Stack of return addresses for return function tracing */ ++ struct ftrace_ret_stack *ret_stack; ++ /* ++ * Number of functions that haven't been traced ++ * because of depth overrun. ++ */ ++ atomic_t trace_overrun; + #endif }; /* diff --cc kernel/profile.c index 7f93a5042d3b,dc41827fbfee,a9e422df6bf6..60adefb59b5e --- a/kernel/profile.c +++ b/kernel/profile.c @@@@ -544,7 -544,7 -544,7 +544,7 @@@@ static const struct file_operations pro }; #ifdef CONFIG_SMP - static inline void profile_nop(void *unused) -static void __init profile_nop(void *unused) ++static void profile_nop(void *unused) { } diff --cc kernel/sched.c index 52490bf6b884,b7480fb5c3dc,d906f72b42d2..7729c4bbc8ba --- a/kernel/sched.c +++ b/kernel/sched.c @@@@ -1459,11 -1453,12 -1439,9 +1459,12 @@@@ static int task_hot(struct task_struct static unsigned long cpu_avg_load_per_task(int cpu) { struct rq *rq = cpu_rq(cpu); + + unsigned long nr_running = ACCESS_ONCE(rq->nr_running); - - if (rq->nr_running) - - rq->avg_load_per_task = rq->load.weight / rq->nr_running; + + if (nr_running) + + rq->avg_load_per_task = rq->load.weight / nr_running; + else + rq->avg_load_per_task = 0; return rq->avg_load_per_task; }