Merge branch 'timer/cleanup' into late/mvebu2
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / sh / lib / mcount.S
CommitLineData
fad57feb
MF
1/*
2 * arch/sh/lib/mcount.S
3 *
e460ab27 4 * Copyright (C) 2008, 2009 Paul Mundt
c1340c05 5 * Copyright (C) 2008, 2009 Matt Fleming
fad57feb
MF
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <asm/ftrace.h>
b99610fb
MF
12#include <asm/thread_info.h>
13#include <asm/asm-offsets.h>
fad57feb
MF
14
15#define MCOUNT_ENTER() \
16 mov.l r4, @-r15; \
17 mov.l r5, @-r15; \
18 mov.l r6, @-r15; \
19 mov.l r7, @-r15; \
20 sts.l pr, @-r15; \
21 \
22 mov.l @(20,r15),r4; \
23 sts pr, r5
24
25#define MCOUNT_LEAVE() \
26 lds.l @r15+, pr; \
27 mov.l @r15+, r7; \
28 mov.l @r15+, r6; \
29 mov.l @r15+, r5; \
30 rts; \
31 mov.l @r15+, r4
32
b99610fb
MF
33#ifdef CONFIG_STACK_DEBUG
34/*
35 * Perform diagnostic checks on the state of the kernel stack.
36 *
37 * Check for stack overflow. If there is less than 1KB free
38 * then it has overflowed.
39 *
40 * Make sure the stack pointer contains a valid address. Valid
41 * addresses for kernel stacks are anywhere after the bss
363737d6 42 * (after __bss_stop) and anywhere in init_thread_union (init_stack).
b99610fb
MF
43 */
44#define STACK_CHECK() \
45 mov #(THREAD_SIZE >> 10), r0; \
46 shll8 r0; \
47 shll2 r0; \
48 \
49 /* r1 = sp & (THREAD_SIZE - 1) */ \
50 mov #-1, r1; \
51 add r0, r1; \
52 and r15, r1; \
53 \
54 mov #TI_SIZE, r3; \
55 mov #(STACK_WARN >> 8), r2; \
56 shll8 r2; \
57 add r3, r2; \
58 \
59 /* Is the stack overflowing? */ \
60 cmp/hi r2, r1; \
61 bf stack_panic; \
62 \
363737d6 63 /* If sp > __bss_stop then we're OK. */ \
b99610fb
MF
64 mov.l .L_ebss, r1; \
65 cmp/hi r1, r15; \
66 bt 1f; \
67 \
68 /* If sp < init_stack, we're not OK. */ \
69 mov.l .L_init_thread_union, r1; \
70 cmp/hs r1, r15; \
71 bf stack_panic; \
72 \
363737d6 73 /* If sp > init_stack && sp < __bss_stop, not OK. */ \
b99610fb
MF
74 add r0, r1; \
75 cmp/hs r1, r15; \
76 bt stack_panic; \
771:
78#else
79#define STACK_CHECK()
80#endif /* CONFIG_STACK_DEBUG */
81
fad57feb
MF
82 .align 2
83 .globl _mcount
84 .type _mcount,@function
85 .globl mcount
86 .type mcount,@function
87_mcount:
88mcount:
e460ab27
PM
89 STACK_CHECK()
90
91#ifndef CONFIG_FUNCTION_TRACER
92 rts
93 nop
94#else
c1340c05
MF
95#ifndef CONFIG_DYNAMIC_FTRACE
96 mov.l .Lfunction_trace_stop, r0
97 mov.l @r0, r0
98 tst r0, r0
99 bf ftrace_stub
100#endif
b99610fb 101
fad57feb
MF
102 MCOUNT_ENTER()
103
104#ifdef CONFIG_DYNAMIC_FTRACE
105 .globl mcount_call
106mcount_call:
107 mov.l .Lftrace_stub, r6
108#else
109 mov.l .Lftrace_trace_function, r6
110 mov.l ftrace_stub, r7
111 cmp/eq r6, r7
112 bt skip_trace
113 mov.l @r6, r6
114#endif
115
116 jsr @r6
117 nop
118
327933f5
MF
119#ifdef CONFIG_FUNCTION_GRAPH_TRACER
120 mov.l .Lftrace_graph_return, r6
121 mov.l .Lftrace_stub, r7
122 cmp/eq r6, r7
123 bt 1f
124
125 mov.l .Lftrace_graph_caller, r0
126 jmp @r0
127 nop
128
1291:
130 mov.l .Lftrace_graph_entry, r6
131 mov.l .Lftrace_graph_entry_stub, r7
132 cmp/eq r6, r7
133 bt skip_trace
134
135 mov.l .Lftrace_graph_caller, r0
136 jmp @r0
137 nop
138
139 .align 2
140.Lftrace_graph_return:
141 .long ftrace_graph_return
142.Lftrace_graph_entry:
143 .long ftrace_graph_entry
144.Lftrace_graph_entry_stub:
145 .long ftrace_graph_entry_stub
146.Lftrace_graph_caller:
147 .long ftrace_graph_caller
148#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
149
150 .globl skip_trace
fad57feb
MF
151skip_trace:
152 MCOUNT_LEAVE()
153
154 .align 2
155.Lftrace_trace_function:
327933f5 156 .long ftrace_trace_function
fad57feb
MF
157
158#ifdef CONFIG_DYNAMIC_FTRACE
327933f5
MF
159#ifdef CONFIG_FUNCTION_GRAPH_TRACER
160/*
161 * NOTE: Do not move either ftrace_graph_call or ftrace_caller
162 * as this will affect the calculation of GRAPH_INSN_OFFSET.
163 */
164 .globl ftrace_graph_call
165ftrace_graph_call:
166 mov.l .Lskip_trace, r0
167 jmp @r0
168 nop
169
170 .align 2
171.Lskip_trace:
172 .long skip_trace
173#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
174
fad57feb
MF
175 .globl ftrace_caller
176ftrace_caller:
c1340c05
MF
177 mov.l .Lfunction_trace_stop, r0
178 mov.l @r0, r0
179 tst r0, r0
180 bf ftrace_stub
c652d780 181
fad57feb
MF
182 MCOUNT_ENTER()
183
184 .globl ftrace_call
185ftrace_call:
186 mov.l .Lftrace_stub, r6
187 jsr @r6
188 nop
189
327933f5
MF
190#ifdef CONFIG_FUNCTION_GRAPH_TRACER
191 bra ftrace_graph_call
192 nop
193#else
fad57feb 194 MCOUNT_LEAVE()
327933f5 195#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
fad57feb
MF
196#endif /* CONFIG_DYNAMIC_FTRACE */
197
a470b95e
PM
198 .align 2
199.Lfunction_trace_stop:
200 .long function_trace_stop
201
fad57feb
MF
202/*
203 * NOTE: From here on the locations of the .Lftrace_stub label and
204 * ftrace_stub itself are fixed. Adding additional data here will skew
205 * the displacement for the memory table and break the block replacement.
206 * Place new labels either after the ftrace_stub body, or before
207 * ftrace_caller. You have been warned.
208 */
fad57feb
MF
209.Lftrace_stub:
210 .long ftrace_stub
211
212 .globl ftrace_stub
213ftrace_stub:
214 rts
215 nop
c1340c05 216
327933f5
MF
217#ifdef CONFIG_FUNCTION_GRAPH_TRACER
218 .globl ftrace_graph_caller
219ftrace_graph_caller:
220 mov.l 2f, r0
221 mov.l @r0, r0
222 tst r0, r0
223 bt 1f
224
225 mov.l 3f, r1
226 jmp @r1
227 nop
2281:
229 /*
230 * MCOUNT_ENTER() pushed 5 registers onto the stack, so
231 * the stack address containing our return address is
232 * r15 + 20.
233 */
234 mov #20, r0
235 add r15, r0
236 mov r0, r4
237
238 mov.l .Lprepare_ftrace_return, r0
239 jsr @r0
240 nop
241
242 MCOUNT_LEAVE()
243
244 .align 2
2452: .long function_trace_stop
2463: .long skip_trace
247.Lprepare_ftrace_return:
248 .long prepare_ftrace_return
249
250 .globl return_to_handler
251return_to_handler:
252 /*
253 * Save the return values.
254 */
255 mov.l r0, @-r15
256 mov.l r1, @-r15
257
258 mov #0, r4
259
260 mov.l .Lftrace_return_to_handler, r0
261 jsr @r0
262 nop
263
264 /*
265 * The return value from ftrace_return_handler has the real
266 * address that we should return to.
267 */
268 lds r0, pr
269 mov.l @r15+, r1
270 rts
271 mov.l @r15+, r0
272
273
274 .align 2
275.Lftrace_return_to_handler:
276 .long ftrace_return_to_handler
277#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
e460ab27
PM
278#endif /* CONFIG_FUNCTION_TRACER */
279
280#ifdef CONFIG_STACK_DEBUG
281 .globl stack_panic
282stack_panic:
283 mov.l .Ldump_stack, r0
284 jsr @r0
285 nop
286
287 mov.l .Lpanic, r0
288 jsr @r0
289 mov.l .Lpanic_s, r4
290
291 rts
292 nop
293
294 .align 2
e460ab27
PM
295.L_init_thread_union:
296 .long init_thread_union
14eae6e9
PM
297.L_ebss:
298 .long __bss_stop
e460ab27
PM
299.Lpanic:
300 .long panic
301.Lpanic_s:
302 .long .Lpanic_str
303.Ldump_stack:
304 .long dump_stack
305
306 .section .rodata
307 .align 2
308.Lpanic_str:
309 .string "Stack error"
310#endif /* CONFIG_STACK_DEBUG */