From: Anton Blanchard Date: Mon, 27 Mar 2006 01:00:45 +0000 (+1100) Subject: [PATCH] powerpc: Remove oprofile spinlock backtrace code X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=15e812ad849e142e3dfc984d33c4d8042389f148;p=GitHub%2Fmt8127%2Fandroid_kernel_alcatel_ttab.git [PATCH] powerpc: Remove oprofile spinlock backtrace code Remove oprofile spinlock backtrace code now we have proper calltrace support. Also make MMCRA sihv and sipr bits a variable since they may change in future cpus. Finally, MMCRA should be a 64bit quantity. Signed-off-by: Anton Blanchard Signed-off-by: Paul Mackerras --- diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c index 2b9143b0f6ba..5b1de7e8041e 100644 --- a/arch/powerpc/oprofile/common.c +++ b/arch/powerpc/oprofile/common.c @@ -117,17 +117,10 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root) oprofilefs_create_ulong(sb, root, "enable_kernel", &sys.enable_kernel); oprofilefs_create_ulong(sb, root, "enable_user", &sys.enable_user); -#ifdef CONFIG_PPC64 - oprofilefs_create_ulong(sb, root, "backtrace_spinlocks", - &sys.backtrace_spinlocks); -#endif /* Default to tracing both kernel and user */ sys.enable_kernel = 1; sys.enable_user = 1; -#ifdef CONFIG_PPC64 - sys.backtrace_spinlocks = 0; -#endif return 0; } diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c index 38db2efef3bc..4c2beab1fdc1 100644 --- a/arch/powerpc/oprofile/op_model_power4.c +++ b/arch/powerpc/oprofile/op_model_power4.c @@ -25,18 +25,14 @@ static unsigned long reset_value[OP_MAX_COUNTER]; static int oprofile_running; static int mmcra_has_sihv; +/* Unfortunately these bits vary between CPUs */ +static unsigned long mmcra_sihv = MMCRA_SIHV; +static unsigned long mmcra_sipr = MMCRA_SIPR; /* mmcr values are set in power4_reg_setup, used in power4_cpu_setup */ static u32 mmcr0_val; static u64 mmcr1_val; -static u32 mmcra_val; - -/* - * Since we do not have an NMI, backtracing through spinlocks is - * only a best guess. In light of this, allow it to be disabled at - * runtime. - */ -static int backtrace_spinlocks; +static u64 mmcra_val; static void power4_reg_setup(struct op_counter_config *ctr, struct op_system_config *sys, @@ -63,8 +59,6 @@ static void power4_reg_setup(struct op_counter_config *ctr, mmcr1_val = sys->mmcr1; mmcra_val = sys->mmcra; - backtrace_spinlocks = sys->backtrace_spinlocks; - for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) reset_value[i] = 0x80000000UL - ctr[i].count; @@ -197,25 +191,6 @@ static void __attribute_used__ kernel_unknown_bucket(void) { } -static unsigned long check_spinlock_pc(struct pt_regs *regs, - unsigned long profile_pc) -{ - unsigned long pc = instruction_pointer(regs); - - /* - * If both the SIAR (sampled instruction) and the perfmon exception - * occurred in a spinlock region then we account the sample to the - * calling function. This isnt 100% correct, we really need soft - * IRQ disable so we always get the perfmon exception at the - * point at which the SIAR is set. - */ - if (backtrace_spinlocks && in_lock_functions(pc) && - in_lock_functions(profile_pc)) - return regs->link; - else - return profile_pc; -} - /* * On GQ and newer the MMCRA stores the HV and PR bits at the time * the SIAR was sampled. We use that to work out if the SIAR was sampled in @@ -228,17 +203,17 @@ static unsigned long get_pc(struct pt_regs *regs) /* Cant do much about it */ if (!mmcra_has_sihv) - return check_spinlock_pc(regs, pc); + return pc; mmcra = mfspr(SPRN_MMCRA); /* Were we in the hypervisor? */ - if (firmware_has_feature(FW_FEATURE_LPAR) && (mmcra & MMCRA_SIHV)) + if (firmware_has_feature(FW_FEATURE_LPAR) && (mmcra & mmcra_sihv)) /* function descriptor madness */ return *((unsigned long *)hypervisor_bucket); /* We were in userspace, nothing to do */ - if (mmcra & MMCRA_SIPR) + if (mmcra & mmcra_sipr) return pc; #ifdef CONFIG_PPC_RTAS @@ -257,7 +232,7 @@ static unsigned long get_pc(struct pt_regs *regs) /* function descriptor madness */ return *((unsigned long *)kernel_unknown_bucket); - return check_spinlock_pc(regs, pc); + return pc; } static int get_kernel(unsigned long pc) @@ -268,7 +243,7 @@ static int get_kernel(unsigned long pc) is_kernel = is_kernel_addr(pc); } else { unsigned long mmcra = mfspr(SPRN_MMCRA); - is_kernel = ((mmcra & MMCRA_SIPR) == 0); + is_kernel = ((mmcra & mmcra_sipr) == 0); } return is_kernel; diff --git a/include/asm-powerpc/oprofile_impl.h b/include/asm-powerpc/oprofile_impl.h index aa180e907a67..5b33994cd488 100644 --- a/include/asm-powerpc/oprofile_impl.h +++ b/include/asm-powerpc/oprofile_impl.h @@ -35,9 +35,6 @@ struct op_system_config { #endif unsigned long enable_kernel; unsigned long enable_user; -#ifdef CONFIG_PPC64 - unsigned long backtrace_spinlocks; -#endif }; /* Per-arch configuration */