powerpc: Use more accurate limit for first segment memory allocations
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / powerpc / platforms / pseries / hvCall.S
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * This file contains the generic code to perform a call to the
3 * pSeries LPAR hypervisor.
1da177e4
LT
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10#include <asm/hvcall.h>
11#include <asm/processor.h>
12#include <asm/ppc_asm.h>
57852a85 13#include <asm/asm-offsets.h>
1da177e4
LT
14
15#define STK_PARM(i) (48 + ((i)-3)*8)
16
c8cd093a
AB
17#ifdef CONFIG_TRACEPOINTS
18
19 .section ".toc","aw"
20
21 .globl hcall_tracepoint_refcount
22hcall_tracepoint_refcount:
23 .llong 0
24
25 .section ".text"
26
57852a85
MK
27/*
28 * precall must preserve all registers. use unused STK_PARM()
c8cd093a
AB
29 * areas to save snapshots and opcode. We branch around this
30 * in early init (eg when populating the MMU hashtable) by using an
31 * unconditional cpu feature.
57852a85 32 */
6f26353c 33#define HCALL_INST_PRECALL(FIRST_REG) \
57852a85 34BEGIN_FTR_SECTION; \
c8cd093a
AB
35 b 1f; \
36END_FTR_SECTION(0, 1); \
37 ld r12,hcall_tracepoint_refcount@toc(r2); \
38 cmpdi r12,0; \
39 beq+ 1f; \
40 mflr r0; \
41 std r3,STK_PARM(r3)(r1); \
42 std r4,STK_PARM(r4)(r1); \
43 std r5,STK_PARM(r5)(r1); \
44 std r6,STK_PARM(r6)(r1); \
45 std r7,STK_PARM(r7)(r1); \
46 std r8,STK_PARM(r8)(r1); \
47 std r9,STK_PARM(r9)(r1); \
48 std r10,STK_PARM(r10)(r1); \
49 std r0,16(r1); \
6f26353c 50 addi r4,r1,STK_PARM(FIRST_REG); \
c8cd093a
AB
51 stdu r1,-STACK_FRAME_OVERHEAD(r1); \
52 bl .__trace_hcall_entry; \
53 addi r1,r1,STACK_FRAME_OVERHEAD; \
54 ld r0,16(r1); \
55 ld r3,STK_PARM(r3)(r1); \
56 ld r4,STK_PARM(r4)(r1); \
57 ld r5,STK_PARM(r5)(r1); \
58 ld r6,STK_PARM(r6)(r1); \
59 ld r7,STK_PARM(r7)(r1); \
60 ld r8,STK_PARM(r8)(r1); \
61 ld r9,STK_PARM(r9)(r1); \
62 ld r10,STK_PARM(r10)(r1); \
63 mtlr r0; \
641:
65
57852a85
MK
66/*
67 * postcall is performed immediately before function return which
4f5fa2fb
AB
68 * allows liberal use of volatile registers. We branch around this
69 * in early init (eg when populating the MMU hashtable) by using an
70 * unconditional cpu feature.
57852a85 71 */
6f26353c 72#define __HCALL_INST_POSTCALL \
4f5fa2fb
AB
73BEGIN_FTR_SECTION; \
74 b 1f; \
75END_FTR_SECTION(0, 1); \
c8cd093a
AB
76 ld r12,hcall_tracepoint_refcount@toc(r2); \
77 cmpdi r12,0; \
78 beq+ 1f; \
79 mflr r0; \
80 ld r6,STK_PARM(r3)(r1); \
81 std r3,STK_PARM(r3)(r1); \
82 mr r4,r3; \
83 mr r3,r6; \
84 std r0,16(r1); \
85 stdu r1,-STACK_FRAME_OVERHEAD(r1); \
86 bl .__trace_hcall_exit; \
87 addi r1,r1,STACK_FRAME_OVERHEAD; \
88 ld r0,16(r1); \
89 ld r3,STK_PARM(r3)(r1); \
90 mtlr r0; \
57852a85 911:
6f26353c
AB
92
93#define HCALL_INST_POSTCALL_NORETS \
94 li r5,0; \
95 __HCALL_INST_POSTCALL
96
97#define HCALL_INST_POSTCALL(BUFREG) \
98 mr r5,BUFREG; \
99 __HCALL_INST_POSTCALL
100
57852a85 101#else
6f26353c
AB
102#define HCALL_INST_PRECALL(FIRST_ARG)
103#define HCALL_INST_POSTCALL_NORETS
104#define HCALL_INST_POSTCALL(BUFREG)
57852a85
MK
105#endif
106
1da177e4
LT
107 .text
108
1da177e4 109_GLOBAL(plpar_hcall_norets)
eeb24de4
AB
110 HMT_MEDIUM
111
1da177e4
LT
112 mfcr r0
113 stw r0,8(r1)
114
6f26353c 115 HCALL_INST_PRECALL(r4)
57852a85 116
1da177e4
LT
117 HVSC /* invoke the hypervisor */
118
6f26353c 119 HCALL_INST_POSTCALL_NORETS
57852a85 120
1da177e4
LT
121 lwz r0,8(r1)
122 mtcrf 0xff,r0
123 blr /* return r3 = status */
124
b9377ffc 125_GLOBAL(plpar_hcall)
eeb24de4
AB
126 HMT_MEDIUM
127
1da177e4 128 mfcr r0
1da177e4
LT
129 stw r0,8(r1)
130
6f26353c 131 HCALL_INST_PRECALL(r5)
57852a85 132
b9377ffc 133 std r4,STK_PARM(r4)(r1) /* Save ret buffer */
1da177e4 134
b9377ffc
AB
135 mr r4,r5
136 mr r5,r6
137 mr r6,r7
138 mr r7,r8
139 mr r8,r9
140 mr r9,r10
1da177e4
LT
141
142 HVSC /* invoke the hypervisor */
143
b9377ffc
AB
144 ld r12,STK_PARM(r4)(r1)
145 std r4, 0(r12)
146 std r5, 8(r12)
147 std r6, 16(r12)
148 std r7, 24(r12)
b13a96cf 149
6f26353c 150 HCALL_INST_POSTCALL(r12)
57852a85 151
b13a96cf 152 lwz r0,8(r1)
b13a96cf
HS
153 mtcrf 0xff,r0
154
155 blr /* return r3 = status */
156
b4aea36b
MK
157/*
158 * plpar_hcall_raw can be called in real mode. kexec/kdump need some
159 * hypervisor calls to be executed in real mode. So plpar_hcall_raw
160 * does not access the per cpu hypervisor call statistics variables,
161 * since these variables may not be present in the RMO region.
162 */
163_GLOBAL(plpar_hcall_raw)
164 HMT_MEDIUM
165
166 mfcr r0
167 stw r0,8(r1)
168
169 std r4,STK_PARM(r4)(r1) /* Save ret buffer */
170
171 mr r4,r5
172 mr r5,r6
173 mr r6,r7
174 mr r7,r8
175 mr r8,r9
176 mr r9,r10
177
178 HVSC /* invoke the hypervisor */
179
180 ld r12,STK_PARM(r4)(r1)
181 std r4, 0(r12)
182 std r5, 8(r12)
183 std r6, 16(r12)
184 std r7, 24(r12)
185
186 lwz r0,8(r1)
187 mtcrf 0xff,r0
188
189 blr /* return r3 = status */
190
b9377ffc 191_GLOBAL(plpar_hcall9)
b13a96cf
HS
192 HMT_MEDIUM
193
194 mfcr r0
195 stw r0,8(r1)
196
6f26353c 197 HCALL_INST_PRECALL(r5)
57852a85 198
b9377ffc
AB
199 std r4,STK_PARM(r4)(r1) /* Save ret buffer */
200
201 mr r4,r5
202 mr r5,r6
203 mr r6,r7
204 mr r7,r8
205 mr r8,r9
206 mr r9,r10
207 ld r10,STK_PARM(r11)(r1) /* put arg7 in R10 */
208 ld r11,STK_PARM(r12)(r1) /* put arg8 in R11 */
209 ld r12,STK_PARM(r13)(r1) /* put arg9 in R12 */
b13a96cf
HS
210
211 HVSC /* invoke the hypervisor */
212
ab87e8dc 213 mr r0,r12
b9377ffc
AB
214 ld r12,STK_PARM(r4)(r1)
215 std r4, 0(r12)
216 std r5, 8(r12)
217 std r6, 16(r12)
218 std r7, 24(r12)
219 std r8, 32(r12)
220 std r9, 40(r12)
221 std r10,48(r12)
222 std r11,56(r12)
ab87e8dc 223 std r0, 64(r12)
b13a96cf 224
6f26353c 225 HCALL_INST_POSTCALL(r12)
57852a85 226
b13a96cf
HS
227 lwz r0,8(r1)
228 mtcrf 0xff,r0
229
230 blr /* return r3 = status */