2 * Performance counter callchain support - powerpc architecture code
4 * Copyright © 2009 Paul Mackerras, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/perf_event.h>
14 #include <linux/percpu.h>
15 #include <linux/uaccess.h>
17 #include <asm/ptrace.h>
18 #include <asm/pgtable.h>
19 #include <asm/sigcontext.h>
20 #include <asm/ucontext.h>
27 * Store another value in a callchain_entry.
29 static inline void callchain_store(struct perf_callchain_entry
*entry
, u64 ip
)
31 unsigned int nr
= entry
->nr
;
33 if (nr
< PERF_MAX_STACK_DEPTH
) {
40 * Is sp valid as the address of the next kernel stack frame after prev_sp?
41 * The next frame may be in a different stack area but should not go
42 * back down in the same stack area.
44 static int valid_next_sp(unsigned long sp
, unsigned long prev_sp
)
47 return 0; /* must be 16-byte aligned */
48 if (!validate_sp(sp
, current
, STACK_FRAME_OVERHEAD
))
50 if (sp
>= prev_sp
+ STACK_FRAME_OVERHEAD
)
53 * sp could decrease when we jump off an interrupt stack
54 * back to the regular process stack.
56 if ((sp
& ~(THREAD_SIZE
- 1)) != (prev_sp
& ~(THREAD_SIZE
- 1)))
61 static void perf_callchain_kernel(struct pt_regs
*regs
,
62 struct perf_callchain_entry
*entry
)
64 unsigned long sp
, next_sp
;
65 unsigned long next_ip
;
72 callchain_store(entry
, PERF_CONTEXT_KERNEL
);
73 callchain_store(entry
, regs
->nip
);
75 if (!validate_sp(sp
, current
, STACK_FRAME_OVERHEAD
))
79 fp
= (unsigned long *) sp
;
82 if (next_sp
== sp
+ STACK_INT_FRAME_SIZE
&&
83 fp
[STACK_FRAME_MARKER
] == STACK_FRAME_REGS_MARKER
) {
85 * This looks like an interrupt frame for an
86 * interrupt that occurred in the kernel
88 regs
= (struct pt_regs
*)(sp
+ STACK_FRAME_OVERHEAD
);
92 callchain_store(entry
, PERF_CONTEXT_KERNEL
);
98 next_ip
= fp
[STACK_FRAME_LR_SAVE
];
101 * We can't tell which of the first two addresses
102 * we get are valid, but we can filter out the
103 * obviously bogus ones here. We replace them
104 * with 0 rather than removing them entirely so
105 * that userspace can tell which is which.
107 if ((level
== 1 && next_ip
== lr
) ||
108 (level
<= 1 && !kernel_text_address(next_ip
)))
114 callchain_store(entry
, next_ip
);
115 if (!valid_next_sp(next_sp
, sp
))
123 * On 64-bit we don't want to invoke hash_page on user addresses from
124 * interrupt context, so if the access faults, we read the page tables
125 * to find which page (if any) is mapped and access it directly.
127 static int read_user_stack_slow(void __user
*ptr
, void *ret
, int nb
)
132 unsigned long addr
= (unsigned long) ptr
;
133 unsigned long offset
;
137 pgdir
= current
->mm
->pgd
;
141 ptep
= find_linux_pte_or_hugepte(pgdir
, addr
, &shift
);
145 /* align address to page boundary */
146 offset
= addr
& ((1UL << shift
) - 1);
152 if (!pte_present(pte
) || !(pte_val(pte
) & _PAGE_USER
))
155 if (!page_is_ram(pfn
))
158 /* no highmem to worry about here */
159 kaddr
= pfn_to_kaddr(pfn
);
160 memcpy(ret
, kaddr
+ offset
, nb
);
164 static int read_user_stack_64(unsigned long __user
*ptr
, unsigned long *ret
)
166 if ((unsigned long)ptr
> TASK_SIZE
- sizeof(unsigned long) ||
167 ((unsigned long)ptr
& 7))
170 if (!__get_user_inatomic(*ret
, ptr
))
173 return read_user_stack_slow(ptr
, ret
, 8);
176 static int read_user_stack_32(unsigned int __user
*ptr
, unsigned int *ret
)
178 if ((unsigned long)ptr
> TASK_SIZE
- sizeof(unsigned int) ||
179 ((unsigned long)ptr
& 3))
182 if (!__get_user_inatomic(*ret
, ptr
))
185 return read_user_stack_slow(ptr
, ret
, 4);
188 static inline int valid_user_sp(unsigned long sp
, int is_64
)
190 if (!sp
|| (sp
& 7) || sp
> (is_64
? TASK_SIZE
: 0x100000000UL
) - 32)
196 * 64-bit user processes use the same stack frame for RT and non-RT signals.
198 struct signal_frame_64
{
199 char dummy
[__SIGNAL_FRAMESIZE
];
201 unsigned long unused
[2];
202 unsigned int tramp
[6];
203 struct siginfo
*pinfo
;
209 static int is_sigreturn_64_address(unsigned long nip
, unsigned long fp
)
211 if (nip
== fp
+ offsetof(struct signal_frame_64
, tramp
))
213 if (vdso64_rt_sigtramp
&& current
->mm
->context
.vdso_base
&&
214 nip
== current
->mm
->context
.vdso_base
+ vdso64_rt_sigtramp
)
220 * Do some sanity checking on the signal frame pointed to by sp.
221 * We check the pinfo and puc pointers in the frame.
223 static int sane_signal_64_frame(unsigned long sp
)
225 struct signal_frame_64 __user
*sf
;
226 unsigned long pinfo
, puc
;
228 sf
= (struct signal_frame_64 __user
*) sp
;
229 if (read_user_stack_64((unsigned long __user
*) &sf
->pinfo
, &pinfo
) ||
230 read_user_stack_64((unsigned long __user
*) &sf
->puc
, &puc
))
232 return pinfo
== (unsigned long) &sf
->info
&&
233 puc
== (unsigned long) &sf
->uc
;
236 static void perf_callchain_user_64(struct pt_regs
*regs
,
237 struct perf_callchain_entry
*entry
)
239 unsigned long sp
, next_sp
;
240 unsigned long next_ip
;
243 struct signal_frame_64 __user
*sigframe
;
244 unsigned long __user
*fp
, *uregs
;
249 callchain_store(entry
, PERF_CONTEXT_USER
);
250 callchain_store(entry
, next_ip
);
253 fp
= (unsigned long __user
*) sp
;
254 if (!valid_user_sp(sp
, 1) || read_user_stack_64(fp
, &next_sp
))
256 if (level
> 0 && read_user_stack_64(&fp
[2], &next_ip
))
260 * Note: the next_sp - sp >= signal frame size check
261 * is true when next_sp < sp, which can happen when
262 * transitioning from an alternate signal stack to the
265 if (next_sp
- sp
>= sizeof(struct signal_frame_64
) &&
266 (is_sigreturn_64_address(next_ip
, sp
) ||
267 (level
<= 1 && is_sigreturn_64_address(lr
, sp
))) &&
268 sane_signal_64_frame(sp
)) {
270 * This looks like an signal frame
272 sigframe
= (struct signal_frame_64 __user
*) sp
;
273 uregs
= sigframe
->uc
.uc_mcontext
.gp_regs
;
274 if (read_user_stack_64(&uregs
[PT_NIP
], &next_ip
) ||
275 read_user_stack_64(&uregs
[PT_LNK
], &lr
) ||
276 read_user_stack_64(&uregs
[PT_R1
], &sp
))
279 callchain_store(entry
, PERF_CONTEXT_USER
);
280 callchain_store(entry
, next_ip
);
286 callchain_store(entry
, next_ip
);
292 static inline int current_is_64bit(void)
295 * We can't use test_thread_flag() here because we may be on an
296 * interrupt stack, and the thread flags don't get copied over
297 * from the thread_info on the main stack to the interrupt stack.
299 return !test_ti_thread_flag(task_thread_info(current
), TIF_32BIT
);
302 #else /* CONFIG_PPC64 */
304 * On 32-bit we just access the address and let hash_page create a
305 * HPTE if necessary, so there is no need to fall back to reading
306 * the page tables. Since this is called at interrupt level,
307 * do_page_fault() won't treat a DSI as a page fault.
309 static int read_user_stack_32(unsigned int __user
*ptr
, unsigned int *ret
)
311 if ((unsigned long)ptr
> TASK_SIZE
- sizeof(unsigned int) ||
312 ((unsigned long)ptr
& 3))
315 return __get_user_inatomic(*ret
, ptr
);
318 static inline void perf_callchain_user_64(struct pt_regs
*regs
,
319 struct perf_callchain_entry
*entry
)
323 static inline int current_is_64bit(void)
328 static inline int valid_user_sp(unsigned long sp
, int is_64
)
330 if (!sp
|| (sp
& 7) || sp
> TASK_SIZE
- 32)
335 #define __SIGNAL_FRAMESIZE32 __SIGNAL_FRAMESIZE
336 #define sigcontext32 sigcontext
337 #define mcontext32 mcontext
338 #define ucontext32 ucontext
339 #define compat_siginfo_t struct siginfo
341 #endif /* CONFIG_PPC64 */
344 * Layout for non-RT signal frames
346 struct signal_frame_32
{
347 char dummy
[__SIGNAL_FRAMESIZE32
];
348 struct sigcontext32 sctx
;
349 struct mcontext32 mctx
;
354 * Layout for RT signal frames
356 struct rt_signal_frame_32
{
357 char dummy
[__SIGNAL_FRAMESIZE32
+ 16];
358 compat_siginfo_t info
;
359 struct ucontext32 uc
;
363 static int is_sigreturn_32_address(unsigned int nip
, unsigned int fp
)
365 if (nip
== fp
+ offsetof(struct signal_frame_32
, mctx
.mc_pad
))
367 if (vdso32_sigtramp
&& current
->mm
->context
.vdso_base
&&
368 nip
== current
->mm
->context
.vdso_base
+ vdso32_sigtramp
)
373 static int is_rt_sigreturn_32_address(unsigned int nip
, unsigned int fp
)
375 if (nip
== fp
+ offsetof(struct rt_signal_frame_32
,
376 uc
.uc_mcontext
.mc_pad
))
378 if (vdso32_rt_sigtramp
&& current
->mm
->context
.vdso_base
&&
379 nip
== current
->mm
->context
.vdso_base
+ vdso32_rt_sigtramp
)
384 static int sane_signal_32_frame(unsigned int sp
)
386 struct signal_frame_32 __user
*sf
;
389 sf
= (struct signal_frame_32 __user
*) (unsigned long) sp
;
390 if (read_user_stack_32((unsigned int __user
*) &sf
->sctx
.regs
, ®s
))
392 return regs
== (unsigned long) &sf
->mctx
;
395 static int sane_rt_signal_32_frame(unsigned int sp
)
397 struct rt_signal_frame_32 __user
*sf
;
400 sf
= (struct rt_signal_frame_32 __user
*) (unsigned long) sp
;
401 if (read_user_stack_32((unsigned int __user
*) &sf
->uc
.uc_regs
, ®s
))
403 return regs
== (unsigned long) &sf
->uc
.uc_mcontext
;
406 static unsigned int __user
*signal_frame_32_regs(unsigned int sp
,
407 unsigned int next_sp
, unsigned int next_ip
)
409 struct mcontext32 __user
*mctx
= NULL
;
410 struct signal_frame_32 __user
*sf
;
411 struct rt_signal_frame_32 __user
*rt_sf
;
414 * Note: the next_sp - sp >= signal frame size check
415 * is true when next_sp < sp, for example, when
416 * transitioning from an alternate signal stack to the
419 if (next_sp
- sp
>= sizeof(struct signal_frame_32
) &&
420 is_sigreturn_32_address(next_ip
, sp
) &&
421 sane_signal_32_frame(sp
)) {
422 sf
= (struct signal_frame_32 __user
*) (unsigned long) sp
;
426 if (!mctx
&& next_sp
- sp
>= sizeof(struct rt_signal_frame_32
) &&
427 is_rt_sigreturn_32_address(next_ip
, sp
) &&
428 sane_rt_signal_32_frame(sp
)) {
429 rt_sf
= (struct rt_signal_frame_32 __user
*) (unsigned long) sp
;
430 mctx
= &rt_sf
->uc
.uc_mcontext
;
435 return mctx
->mc_gregs
;
438 static void perf_callchain_user_32(struct pt_regs
*regs
,
439 struct perf_callchain_entry
*entry
)
441 unsigned int sp
, next_sp
;
442 unsigned int next_ip
;
445 unsigned int __user
*fp
, *uregs
;
450 callchain_store(entry
, PERF_CONTEXT_USER
);
451 callchain_store(entry
, next_ip
);
453 while (entry
->nr
< PERF_MAX_STACK_DEPTH
) {
454 fp
= (unsigned int __user
*) (unsigned long) sp
;
455 if (!valid_user_sp(sp
, 0) || read_user_stack_32(fp
, &next_sp
))
457 if (level
> 0 && read_user_stack_32(&fp
[1], &next_ip
))
460 uregs
= signal_frame_32_regs(sp
, next_sp
, next_ip
);
461 if (!uregs
&& level
<= 1)
462 uregs
= signal_frame_32_regs(sp
, next_sp
, lr
);
465 * This looks like an signal frame, so restart
466 * the stack trace with the values in it.
468 if (read_user_stack_32(&uregs
[PT_NIP
], &next_ip
) ||
469 read_user_stack_32(&uregs
[PT_LNK
], &lr
) ||
470 read_user_stack_32(&uregs
[PT_R1
], &sp
))
473 callchain_store(entry
, PERF_CONTEXT_USER
);
474 callchain_store(entry
, next_ip
);
480 callchain_store(entry
, next_ip
);
487 * Since we can't get PMU interrupts inside a PMU interrupt handler,
488 * we don't need separate irq and nmi entries here.
490 static DEFINE_PER_CPU(struct perf_callchain_entry
, cpu_perf_callchain
);
492 struct perf_callchain_entry
*perf_callchain(struct pt_regs
*regs
)
494 struct perf_callchain_entry
*entry
= &__get_cpu_var(cpu_perf_callchain
);
498 if (!user_mode(regs
)) {
499 perf_callchain_kernel(regs
, entry
);
501 regs
= task_pt_regs(current
);
507 if (current_is_64bit())
508 perf_callchain_user_64(regs
, entry
);
510 perf_callchain_user_32(regs
, entry
);