Commit | Line | Data |
---|---|---|
90e9311a JH |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Generation of main entry point for the guest, exception handling. | |
7 | * | |
8 | * Copyright (C) 2012 MIPS Technologies, Inc. | |
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | |
10 | * | |
11 | * Copyright (C) 2016 Imagination Technologies Ltd. | |
12 | */ | |
13 | ||
14 | #include <linux/kvm_host.h> | |
c550d539 | 15 | #include <linux/log2.h> |
7faa6eec | 16 | #include <asm/mmu_context.h> |
90e9311a JH |
17 | #include <asm/msa.h> |
18 | #include <asm/setup.h> | |
a7cfa7ac | 19 | #include <asm/tlbex.h> |
90e9311a JH |
20 | #include <asm/uasm.h> |
21 | ||
22 | /* Register names */ | |
23 | #define ZERO 0 | |
24 | #define AT 1 | |
25 | #define V0 2 | |
26 | #define V1 3 | |
27 | #define A0 4 | |
28 | #define A1 5 | |
29 | ||
30 | #if _MIPS_SIM == _MIPS_SIM_ABI32 | |
31 | #define T0 8 | |
32 | #define T1 9 | |
33 | #define T2 10 | |
34 | #define T3 11 | |
35 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ | |
36 | ||
37 | #if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 | |
38 | #define T0 12 | |
39 | #define T1 13 | |
40 | #define T2 14 | |
41 | #define T3 15 | |
42 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */ | |
43 | ||
44 | #define S0 16 | |
45 | #define S1 17 | |
46 | #define T9 25 | |
47 | #define K0 26 | |
48 | #define K1 27 | |
49 | #define GP 28 | |
50 | #define SP 29 | |
51 | #define RA 31 | |
52 | ||
53 | /* Some CP0 registers */ | |
1934a3ad | 54 | #define C0_PWBASE 5, 5 |
90e9311a JH |
55 | #define C0_HWRENA 7, 0 |
56 | #define C0_BADVADDR 8, 0 | |
6a97c775 JH |
57 | #define C0_BADINSTR 8, 1 |
58 | #define C0_BADINSTRP 8, 2 | |
90e9311a | 59 | #define C0_ENTRYHI 10, 0 |
1934a3ad | 60 | #define C0_GUESTCTL1 10, 4 |
90e9311a | 61 | #define C0_STATUS 12, 0 |
1934a3ad | 62 | #define C0_GUESTCTL0 12, 6 |
90e9311a JH |
63 | #define C0_CAUSE 13, 0 |
64 | #define C0_EPC 14, 0 | |
65 | #define C0_EBASE 15, 1 | |
90e9311a JH |
66 | #define C0_CONFIG5 16, 5 |
67 | #define C0_DDATA_LO 28, 3 | |
68 | #define C0_ERROREPC 30, 0 | |
69 | ||
70 | #define CALLFRAME_SIZ 32 | |
71 | ||
1d756942 JH |
72 | #ifdef CONFIG_64BIT |
73 | #define ST0_KX_IF_64 ST0_KX | |
74 | #else | |
75 | #define ST0_KX_IF_64 0 | |
76 | #endif | |
77 | ||
1e5217f5 JH |
78 | static unsigned int scratch_vcpu[2] = { C0_DDATA_LO }; |
79 | static unsigned int scratch_tmp[2] = { C0_ERROREPC }; | |
80 | ||
90e9311a JH |
81 | enum label_id { |
82 | label_fpu_1 = 1, | |
83 | label_msa_1, | |
84 | label_return_to_host, | |
85 | label_kernel_asid, | |
1f9ca62c | 86 | label_exit_common, |
90e9311a JH |
87 | }; |
88 | ||
89 | UASM_L_LA(_fpu_1) | |
90 | UASM_L_LA(_msa_1) | |
91 | UASM_L_LA(_return_to_host) | |
92 | UASM_L_LA(_kernel_asid) | |
1f9ca62c | 93 | UASM_L_LA(_exit_common) |
90e9311a JH |
94 | |
95 | static void *kvm_mips_build_enter_guest(void *addr); | |
96 | static void *kvm_mips_build_ret_from_exit(void *addr); | |
97 | static void *kvm_mips_build_ret_to_guest(void *addr); | |
98 | static void *kvm_mips_build_ret_to_host(void *addr); | |
99 | ||
29b500b5 JH |
100 | /* |
101 | * The version of this function in tlbex.c uses current_cpu_type(), but for KVM | |
102 | * we assume symmetry. | |
103 | */ | |
104 | static int c0_kscratch(void) | |
105 | { | |
106 | switch (boot_cpu_type()) { | |
107 | case CPU_XLP: | |
108 | case CPU_XLR: | |
109 | return 22; | |
110 | default: | |
111 | return 31; | |
112 | } | |
113 | } | |
114 | ||
1e5217f5 JH |
115 | /** |
116 | * kvm_mips_entry_setup() - Perform global setup for entry code. | |
117 | * | |
118 | * Perform global setup for entry code, such as choosing a scratch register. | |
119 | * | |
120 | * Returns: 0 on success. | |
121 | * -errno on failure. | |
122 | */ | |
123 | int kvm_mips_entry_setup(void) | |
124 | { | |
125 | /* | |
126 | * We prefer to use KScratchN registers if they are available over the | |
127 | * defaults above, which may not work on all cores. | |
128 | */ | |
29b500b5 | 129 | unsigned int kscratch_mask = cpu_data[0].kscratch_mask; |
1e5217f5 | 130 | |
a7cfa7ac JH |
131 | if (pgd_reg != -1) |
132 | kscratch_mask &= ~BIT(pgd_reg); | |
133 | ||
1e5217f5 JH |
134 | /* Pick a scratch register for storing VCPU */ |
135 | if (kscratch_mask) { | |
29b500b5 | 136 | scratch_vcpu[0] = c0_kscratch(); |
1e5217f5 JH |
137 | scratch_vcpu[1] = ffs(kscratch_mask) - 1; |
138 | kscratch_mask &= ~BIT(scratch_vcpu[1]); | |
139 | } | |
140 | ||
141 | /* Pick a scratch register to use as a temp for saving state */ | |
142 | if (kscratch_mask) { | |
29b500b5 | 143 | scratch_tmp[0] = c0_kscratch(); |
1e5217f5 JH |
144 | scratch_tmp[1] = ffs(kscratch_mask) - 1; |
145 | kscratch_mask &= ~BIT(scratch_tmp[1]); | |
146 | } | |
147 | ||
148 | return 0; | |
149 | } | |
150 | ||
151 | static void kvm_mips_build_save_scratch(u32 **p, unsigned int tmp, | |
152 | unsigned int frame) | |
153 | { | |
154 | /* Save the VCPU scratch register value in cp0_epc of the stack frame */ | |
e41637d8 | 155 | UASM_i_MFC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]); |
1e5217f5 JH |
156 | UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame); |
157 | ||
158 | /* Save the temp scratch register value in cp0_cause of stack frame */ | |
29b500b5 | 159 | if (scratch_tmp[0] == c0_kscratch()) { |
e41637d8 | 160 | UASM_i_MFC0(p, tmp, scratch_tmp[0], scratch_tmp[1]); |
1e5217f5 JH |
161 | UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame); |
162 | } | |
163 | } | |
164 | ||
165 | static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp, | |
166 | unsigned int frame) | |
167 | { | |
168 | /* | |
169 | * Restore host scratch register values saved by | |
170 | * kvm_mips_build_save_scratch(). | |
171 | */ | |
172 | UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame); | |
e41637d8 | 173 | UASM_i_MTC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]); |
1e5217f5 | 174 | |
29b500b5 | 175 | if (scratch_tmp[0] == c0_kscratch()) { |
1e5217f5 | 176 | UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame); |
e41637d8 | 177 | UASM_i_MTC0(p, tmp, scratch_tmp[0], scratch_tmp[1]); |
1e5217f5 JH |
178 | } |
179 | } | |
180 | ||
0d17aea5 JH |
181 | /** |
182 | * build_set_exc_base() - Assemble code to write exception base address. | |
183 | * @p: Code buffer pointer. | |
184 | * @reg: Source register (generated code may set WG bit in @reg). | |
185 | * | |
186 | * Assemble code to modify the exception base address in the EBase register, | |
187 | * using the appropriately sized access and setting the WG bit if necessary. | |
188 | */ | |
189 | static inline void build_set_exc_base(u32 **p, unsigned int reg) | |
190 | { | |
191 | if (cpu_has_ebase_wg) { | |
192 | /* Set WG so that all the bits get written */ | |
193 | uasm_i_ori(p, reg, reg, MIPS_EBASE_WG); | |
194 | UASM_i_MTC0(p, reg, C0_EBASE); | |
195 | } else { | |
196 | uasm_i_mtc0(p, reg, C0_EBASE); | |
197 | } | |
198 | } | |
199 | ||
90e9311a JH |
200 | /** |
201 | * kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU. | |
202 | * @addr: Address to start writing code. | |
203 | * | |
204 | * Assemble the start of the vcpu_run function to run a guest VCPU. The function | |
205 | * conforms to the following prototype: | |
206 | * | |
207 | * int vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu); | |
208 | * | |
209 | * The exit from the guest and return to the caller is handled by the code | |
210 | * generated by kvm_mips_build_ret_to_host(). | |
211 | * | |
212 | * Returns: Next address after end of written function. | |
213 | */ | |
214 | void *kvm_mips_build_vcpu_run(void *addr) | |
215 | { | |
216 | u32 *p = addr; | |
217 | unsigned int i; | |
218 | ||
219 | /* | |
220 | * A0: run | |
221 | * A1: vcpu | |
222 | */ | |
223 | ||
224 | /* k0/k1 not being used in host kernel context */ | |
e41637d8 | 225 | UASM_i_ADDIU(&p, K1, SP, -(int)sizeof(struct pt_regs)); |
90e9311a JH |
226 | for (i = 16; i < 32; ++i) { |
227 | if (i == 24) | |
228 | i = 28; | |
229 | UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), K1); | |
230 | } | |
231 | ||
90e9311a JH |
232 | /* Save host status */ |
233 | uasm_i_mfc0(&p, V0, C0_STATUS); | |
234 | UASM_i_SW(&p, V0, offsetof(struct pt_regs, cp0_status), K1); | |
235 | ||
1e5217f5 JH |
236 | /* Save scratch registers, will be used to store pointer to vcpu etc */ |
237 | kvm_mips_build_save_scratch(&p, V1, K1); | |
90e9311a | 238 | |
1e5217f5 | 239 | /* VCPU scratch register has pointer to vcpu */ |
e41637d8 | 240 | UASM_i_MTC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]); |
90e9311a JH |
241 | |
242 | /* Offset into vcpu->arch */ | |
e41637d8 | 243 | UASM_i_ADDIU(&p, K1, A1, offsetof(struct kvm_vcpu, arch)); |
90e9311a JH |
244 | |
245 | /* | |
246 | * Save the host stack to VCPU, used for exception processing | |
247 | * when we exit from the Guest | |
248 | */ | |
249 | UASM_i_SW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1); | |
250 | ||
251 | /* Save the kernel gp as well */ | |
252 | UASM_i_SW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1); | |
253 | ||
254 | /* | |
255 | * Setup status register for running the guest in UM, interrupts | |
256 | * are disabled | |
257 | */ | |
1d756942 | 258 | UASM_i_LA(&p, K0, ST0_EXL | KSU_USER | ST0_BEV | ST0_KX_IF_64); |
90e9311a JH |
259 | uasm_i_mtc0(&p, K0, C0_STATUS); |
260 | uasm_i_ehb(&p); | |
261 | ||
262 | /* load up the new EBASE */ | |
263 | UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1); | |
0d17aea5 | 264 | build_set_exc_base(&p, K0); |
90e9311a JH |
265 | |
266 | /* | |
267 | * Now that the new EBASE has been loaded, unset BEV, set | |
268 | * interrupt mask as it was but make sure that timer interrupts | |
269 | * are enabled | |
270 | */ | |
1d756942 | 271 | uasm_i_addiu(&p, K0, ZERO, ST0_EXL | KSU_USER | ST0_IE | ST0_KX_IF_64); |
90e9311a JH |
272 | uasm_i_andi(&p, V0, V0, ST0_IM); |
273 | uasm_i_or(&p, K0, K0, V0); | |
274 | uasm_i_mtc0(&p, K0, C0_STATUS); | |
275 | uasm_i_ehb(&p); | |
276 | ||
277 | p = kvm_mips_build_enter_guest(p); | |
278 | ||
279 | return p; | |
280 | } | |
281 | ||
282 | /** | |
283 | * kvm_mips_build_enter_guest() - Assemble code to resume guest execution. | |
284 | * @addr: Address to start writing code. | |
285 | * | |
286 | * Assemble the code to resume guest execution. This code is common between the | |
287 | * initial entry into the guest from the host, and returning from the exit | |
288 | * handler back to the guest. | |
289 | * | |
290 | * Returns: Next address after end of written function. | |
291 | */ | |
292 | static void *kvm_mips_build_enter_guest(void *addr) | |
293 | { | |
294 | u32 *p = addr; | |
295 | unsigned int i; | |
296 | struct uasm_label labels[2]; | |
297 | struct uasm_reloc relocs[2]; | |
1934a3ad JH |
298 | struct uasm_label __maybe_unused *l = labels; |
299 | struct uasm_reloc __maybe_unused *r = relocs; | |
90e9311a JH |
300 | |
301 | memset(labels, 0, sizeof(labels)); | |
302 | memset(relocs, 0, sizeof(relocs)); | |
303 | ||
304 | /* Set Guest EPC */ | |
305 | UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1); | |
e41637d8 | 306 | UASM_i_MTC0(&p, T0, C0_EPC); |
90e9311a | 307 | |
1934a3ad JH |
308 | #ifdef CONFIG_KVM_MIPS_VZ |
309 | /* Save normal linux process pgd (VZ guarantees pgd_reg is set) */ | |
310 | UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg); | |
311 | UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_pgd), K1); | |
312 | ||
313 | /* | |
314 | * Set up KVM GPA pgd. | |
315 | * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD(): | |
316 | * - call tlbmiss_handler_setup_pgd(mm->pgd) | |
317 | * - write mm->pgd into CP0_PWBase | |
318 | * | |
319 | * We keep S0 pointing at struct kvm so we can load the ASID below. | |
320 | */ | |
321 | UASM_i_LW(&p, S0, (int)offsetof(struct kvm_vcpu, kvm) - | |
322 | (int)offsetof(struct kvm_vcpu, arch), K1); | |
323 | UASM_i_LW(&p, A0, offsetof(struct kvm, arch.gpa_mm.pgd), S0); | |
324 | UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd); | |
325 | uasm_i_jalr(&p, RA, T9); | |
326 | /* delay slot */ | |
327 | if (cpu_has_htw) | |
328 | UASM_i_MTC0(&p, A0, C0_PWBASE); | |
329 | else | |
330 | uasm_i_nop(&p); | |
331 | ||
332 | /* Set GM bit to setup eret to VZ guest context */ | |
333 | uasm_i_addiu(&p, V1, ZERO, 1); | |
334 | uasm_i_mfc0(&p, K0, C0_GUESTCTL0); | |
335 | uasm_i_ins(&p, K0, V1, MIPS_GCTL0_GM_SHIFT, 1); | |
336 | uasm_i_mtc0(&p, K0, C0_GUESTCTL0); | |
337 | ||
338 | if (cpu_has_guestid) { | |
339 | /* | |
340 | * Set root mode GuestID, so that root TLB refill handler can | |
341 | * use the correct GuestID in the root TLB. | |
342 | */ | |
343 | ||
344 | /* Get current GuestID */ | |
345 | uasm_i_mfc0(&p, T0, C0_GUESTCTL1); | |
346 | /* Set GuestCtl1.RID = GuestCtl1.ID */ | |
347 | uasm_i_ext(&p, T1, T0, MIPS_GCTL1_ID_SHIFT, | |
348 | MIPS_GCTL1_ID_WIDTH); | |
349 | uasm_i_ins(&p, T0, T1, MIPS_GCTL1_RID_SHIFT, | |
350 | MIPS_GCTL1_RID_WIDTH); | |
351 | uasm_i_mtc0(&p, T0, C0_GUESTCTL1); | |
352 | ||
353 | /* GuestID handles dealiasing so we don't need to touch ASID */ | |
354 | goto skip_asid_restore; | |
355 | } | |
356 | ||
357 | /* Root ASID Dealias (RAD) */ | |
358 | ||
359 | /* Save host ASID */ | |
360 | UASM_i_MFC0(&p, K0, C0_ENTRYHI); | |
361 | UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi), | |
362 | K1); | |
363 | ||
364 | /* Set the root ASID for the Guest */ | |
365 | UASM_i_ADDIU(&p, T1, S0, | |
366 | offsetof(struct kvm, arch.gpa_mm.context.asid)); | |
367 | #else | |
368 | /* Set the ASID for the Guest Kernel or User */ | |
90e9311a JH |
369 | UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1); |
370 | UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]), | |
371 | T0); | |
372 | uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL); | |
373 | uasm_i_xori(&p, T0, T0, KSU_USER); | |
374 | uasm_il_bnez(&p, &r, T0, label_kernel_asid); | |
c550d539 JH |
375 | UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch, |
376 | guest_kernel_mm.context.asid)); | |
90e9311a | 377 | /* else user */ |
c550d539 JH |
378 | UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch, |
379 | guest_user_mm.context.asid)); | |
90e9311a | 380 | uasm_l_kernel_asid(&l, p); |
1934a3ad | 381 | #endif |
90e9311a JH |
382 | |
383 | /* t1: contains the base of the ASID array, need to get the cpu id */ | |
384 | /* smp_processor_id */ | |
e41637d8 | 385 | uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP); |
c550d539 JH |
386 | /* index the ASID array */ |
387 | uasm_i_sll(&p, T2, T2, ilog2(sizeof(long))); | |
90e9311a | 388 | UASM_i_ADDU(&p, T3, T1, T2); |
c550d539 | 389 | UASM_i_LW(&p, K0, 0, T3); |
90e9311a | 390 | #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE |
c550d539 JH |
391 | /* |
392 | * reuse ASID array offset | |
393 | * cpuinfo_mips is a multiple of sizeof(long) | |
394 | */ | |
395 | uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/sizeof(long)); | |
90e9311a JH |
396 | uasm_i_mul(&p, T2, T2, T3); |
397 | ||
398 | UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask); | |
399 | UASM_i_ADDU(&p, AT, AT, T2); | |
400 | UASM_i_LW(&p, T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), AT); | |
401 | uasm_i_and(&p, K0, K0, T2); | |
402 | #else | |
403 | uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID); | |
404 | #endif | |
7faa6eec | 405 | |
1934a3ad | 406 | #ifndef CONFIG_KVM_MIPS_VZ |
7faa6eec JH |
407 | /* |
408 | * Set up KVM T&E GVA pgd. | |
409 | * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD(): | |
410 | * - call tlbmiss_handler_setup_pgd(mm->pgd) | |
411 | * - but skips write into CP0_PWBase for now | |
412 | */ | |
413 | UASM_i_LW(&p, A0, (int)offsetof(struct mm_struct, pgd) - | |
414 | (int)offsetof(struct mm_struct, context.asid), T1); | |
415 | ||
416 | UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd); | |
417 | uasm_i_jalr(&p, RA, T9); | |
418 | uasm_i_mtc0(&p, K0, C0_ENTRYHI); | |
1934a3ad JH |
419 | #else |
420 | /* Set up KVM VZ root ASID (!guestid) */ | |
421 | uasm_i_mtc0(&p, K0, C0_ENTRYHI); | |
422 | skip_asid_restore: | |
423 | #endif | |
90e9311a JH |
424 | uasm_i_ehb(&p); |
425 | ||
426 | /* Disable RDHWR access */ | |
427 | uasm_i_mtc0(&p, ZERO, C0_HWRENA); | |
428 | ||
429 | /* load the guest context from VCPU and return */ | |
430 | for (i = 1; i < 32; ++i) { | |
431 | /* Guest k0/k1 loaded later */ | |
432 | if (i == K0 || i == K1) | |
433 | continue; | |
434 | UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1); | |
435 | } | |
436 | ||
70e92c7e | 437 | #ifndef CONFIG_CPU_MIPSR6 |
90e9311a JH |
438 | /* Restore hi/lo */ |
439 | UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, hi), K1); | |
440 | uasm_i_mthi(&p, K0); | |
441 | ||
442 | UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, lo), K1); | |
443 | uasm_i_mtlo(&p, K0); | |
70e92c7e | 444 | #endif |
90e9311a JH |
445 | |
446 | /* Restore the guest's k0/k1 registers */ | |
447 | UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1); | |
448 | UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1); | |
449 | ||
450 | /* Jump to guest */ | |
451 | uasm_i_eret(&p); | |
452 | ||
453 | uasm_resolve_relocs(relocs, labels); | |
454 | ||
455 | return p; | |
456 | } | |
457 | ||
a7cfa7ac JH |
458 | /** |
459 | * kvm_mips_build_tlb_refill_exception() - Assemble TLB refill handler. | |
460 | * @addr: Address to start writing code. | |
461 | * @handler: Address of common handler (within range of @addr). | |
462 | * | |
463 | * Assemble TLB refill exception fast path handler for guest execution. | |
464 | * | |
465 | * Returns: Next address after end of written function. | |
466 | */ | |
467 | void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler) | |
468 | { | |
469 | u32 *p = addr; | |
470 | struct uasm_label labels[2]; | |
471 | struct uasm_reloc relocs[2]; | |
472 | struct uasm_label *l = labels; | |
473 | struct uasm_reloc *r = relocs; | |
474 | ||
475 | memset(labels, 0, sizeof(labels)); | |
476 | memset(relocs, 0, sizeof(relocs)); | |
477 | ||
478 | /* Save guest k1 into scratch register */ | |
479 | UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]); | |
480 | ||
481 | /* Get the VCPU pointer from the VCPU scratch register */ | |
482 | UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]); | |
483 | ||
484 | /* Save guest k0 into VCPU structure */ | |
485 | UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1); | |
486 | ||
487 | /* | |
488 | * Some of the common tlbex code uses current_cpu_type(). For KVM we | |
489 | * assume symmetry and just disable preemption to silence the warning. | |
490 | */ | |
491 | preempt_disable(); | |
492 | ||
493 | /* | |
494 | * Now for the actual refill bit. A lot of this can be common with the | |
495 | * Linux TLB refill handler, however we don't need to handle so many | |
496 | * cases. We only need to handle user mode refills, and user mode runs | |
497 | * with 32-bit addressing. | |
498 | * | |
499 | * Therefore the branch to label_vmalloc generated by build_get_pmde64() | |
500 | * that isn't resolved should never actually get taken and is harmless | |
501 | * to leave in place for now. | |
502 | */ | |
503 | ||
504 | #ifdef CONFIG_64BIT | |
505 | build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */ | |
506 | #else | |
507 | build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ | |
508 | #endif | |
509 | ||
510 | /* we don't support huge pages yet */ | |
511 | ||
512 | build_get_ptep(&p, K0, K1); | |
513 | build_update_entries(&p, K0, K1); | |
514 | build_tlb_write_entry(&p, &l, &r, tlb_random); | |
515 | ||
516 | preempt_enable(); | |
517 | ||
518 | /* Get the VCPU pointer from the VCPU scratch register again */ | |
519 | UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]); | |
520 | ||
521 | /* Restore the guest's k0/k1 registers */ | |
522 | UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1); | |
523 | uasm_i_ehb(&p); | |
524 | UASM_i_MFC0(&p, K1, scratch_tmp[0], scratch_tmp[1]); | |
525 | ||
526 | /* Jump to guest */ | |
527 | uasm_i_eret(&p); | |
528 | ||
529 | return p; | |
530 | } | |
531 | ||
90e9311a JH |
532 | /** |
533 | * kvm_mips_build_exception() - Assemble first level guest exception handler. | |
534 | * @addr: Address to start writing code. | |
1f9ca62c | 535 | * @handler: Address of common handler (within range of @addr). |
90e9311a JH |
536 | * |
537 | * Assemble exception vector code for guest execution. The generated vector will | |
1f9ca62c | 538 | * branch to the common exception handler generated by kvm_mips_build_exit(). |
90e9311a JH |
539 | * |
540 | * Returns: Next address after end of written function. | |
541 | */ | |
1f9ca62c | 542 | void *kvm_mips_build_exception(void *addr, void *handler) |
90e9311a JH |
543 | { |
544 | u32 *p = addr; | |
1f9ca62c JH |
545 | struct uasm_label labels[2]; |
546 | struct uasm_reloc relocs[2]; | |
547 | struct uasm_label *l = labels; | |
548 | struct uasm_reloc *r = relocs; | |
549 | ||
550 | memset(labels, 0, sizeof(labels)); | |
551 | memset(relocs, 0, sizeof(relocs)); | |
90e9311a | 552 | |
eadfb501 | 553 | /* Save guest k1 into scratch register */ |
e41637d8 | 554 | UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]); |
90e9311a | 555 | |
eadfb501 | 556 | /* Get the VCPU pointer from the VCPU scratch register */ |
e41637d8 JH |
557 | UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]); |
558 | UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch)); | |
eadfb501 JH |
559 | |
560 | /* Save guest k0 into VCPU structure */ | |
561 | UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1); | |
90e9311a | 562 | |
1f9ca62c JH |
563 | /* Branch to the common handler */ |
564 | uasm_il_b(&p, &r, label_exit_common); | |
90e9311a JH |
565 | uasm_i_nop(&p); |
566 | ||
1f9ca62c JH |
567 | uasm_l_exit_common(&l, handler); |
568 | uasm_resolve_relocs(relocs, labels); | |
569 | ||
90e9311a JH |
570 | return p; |
571 | } | |
572 | ||
573 | /** | |
574 | * kvm_mips_build_exit() - Assemble common guest exit handler. | |
575 | * @addr: Address to start writing code. | |
576 | * | |
577 | * Assemble the generic guest exit handling code. This is called by the | |
578 | * exception vectors (generated by kvm_mips_build_exception()), and calls | |
579 | * kvm_mips_handle_exit(), then either resumes the guest or returns to the host | |
580 | * depending on the return value. | |
581 | * | |
582 | * Returns: Next address after end of written function. | |
583 | */ | |
584 | void *kvm_mips_build_exit(void *addr) | |
585 | { | |
586 | u32 *p = addr; | |
587 | unsigned int i; | |
588 | struct uasm_label labels[3]; | |
589 | struct uasm_reloc relocs[3]; | |
590 | struct uasm_label *l = labels; | |
591 | struct uasm_reloc *r = relocs; | |
592 | ||
593 | memset(labels, 0, sizeof(labels)); | |
594 | memset(relocs, 0, sizeof(relocs)); | |
595 | ||
596 | /* | |
597 | * Generic Guest exception handler. We end up here when the guest | |
598 | * does something that causes a trap to kernel mode. | |
eadfb501 JH |
599 | * |
600 | * Both k0/k1 registers will have already been saved (k0 into the vcpu | |
601 | * structure, and k1 into the scratch_tmp register). | |
602 | * | |
603 | * The k1 register will already contain the kvm_vcpu_arch pointer. | |
90e9311a JH |
604 | */ |
605 | ||
90e9311a JH |
606 | /* Start saving Guest context to VCPU */ |
607 | for (i = 0; i < 32; ++i) { | |
608 | /* Guest k0/k1 saved later */ | |
609 | if (i == K0 || i == K1) | |
610 | continue; | |
611 | UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1); | |
612 | } | |
613 | ||
70e92c7e | 614 | #ifndef CONFIG_CPU_MIPSR6 |
90e9311a JH |
615 | /* We need to save hi/lo and restore them on the way out */ |
616 | uasm_i_mfhi(&p, T0); | |
617 | UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, hi), K1); | |
618 | ||
619 | uasm_i_mflo(&p, T0); | |
620 | UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1); | |
70e92c7e | 621 | #endif |
90e9311a | 622 | |
eadfb501 JH |
623 | /* Finally save guest k1 to VCPU */ |
624 | uasm_i_ehb(&p); | |
e41637d8 | 625 | UASM_i_MFC0(&p, T0, scratch_tmp[0], scratch_tmp[1]); |
90e9311a JH |
626 | UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1); |
627 | ||
628 | /* Now that context has been saved, we can use other registers */ | |
629 | ||
630 | /* Restore vcpu */ | |
1934a3ad | 631 | UASM_i_MFC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]); |
90e9311a JH |
632 | |
633 | /* Restore run (vcpu->run) */ | |
1934a3ad | 634 | UASM_i_LW(&p, S0, offsetof(struct kvm_vcpu, run), S1); |
90e9311a JH |
635 | |
636 | /* | |
637 | * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process | |
638 | * the exception | |
639 | */ | |
e41637d8 | 640 | UASM_i_MFC0(&p, K0, C0_EPC); |
90e9311a JH |
641 | UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1); |
642 | ||
e41637d8 | 643 | UASM_i_MFC0(&p, K0, C0_BADVADDR); |
90e9311a JH |
644 | UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr), |
645 | K1); | |
646 | ||
647 | uasm_i_mfc0(&p, K0, C0_CAUSE); | |
648 | uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1); | |
649 | ||
6a97c775 JH |
650 | if (cpu_has_badinstr) { |
651 | uasm_i_mfc0(&p, K0, C0_BADINSTR); | |
652 | uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, | |
653 | host_cp0_badinstr), K1); | |
654 | } | |
655 | ||
656 | if (cpu_has_badinstrp) { | |
657 | uasm_i_mfc0(&p, K0, C0_BADINSTRP); | |
658 | uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, | |
659 | host_cp0_badinstrp), K1); | |
660 | } | |
661 | ||
90e9311a JH |
662 | /* Now restore the host state just enough to run the handlers */ |
663 | ||
664 | /* Switch EBASE to the one used by Linux */ | |
665 | /* load up the host EBASE */ | |
666 | uasm_i_mfc0(&p, V0, C0_STATUS); | |
667 | ||
668 | uasm_i_lui(&p, AT, ST0_BEV >> 16); | |
669 | uasm_i_or(&p, K0, V0, AT); | |
670 | ||
671 | uasm_i_mtc0(&p, K0, C0_STATUS); | |
672 | uasm_i_ehb(&p); | |
673 | ||
674 | UASM_i_LA_mostly(&p, K0, (long)&ebase); | |
675 | UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0); | |
0d17aea5 | 676 | build_set_exc_base(&p, K0); |
90e9311a | 677 | |
d37f4038 JH |
678 | if (raw_cpu_has_fpu) { |
679 | /* | |
680 | * If FPU is enabled, save FCR31 and clear it so that later | |
681 | * ctc1's don't trigger FPE for pending exceptions. | |
682 | */ | |
683 | uasm_i_lui(&p, AT, ST0_CU1 >> 16); | |
684 | uasm_i_and(&p, V1, V0, AT); | |
685 | uasm_il_beqz(&p, &r, V1, label_fpu_1); | |
686 | uasm_i_nop(&p); | |
687 | uasm_i_cfc1(&p, T0, 31); | |
688 | uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31), | |
689 | K1); | |
690 | uasm_i_ctc1(&p, ZERO, 31); | |
691 | uasm_l_fpu_1(&l, p); | |
692 | } | |
90e9311a | 693 | |
38ea7a71 JH |
694 | if (cpu_has_msa) { |
695 | /* | |
696 | * If MSA is enabled, save MSACSR and clear it so that later | |
697 | * instructions don't trigger MSAFPE for pending exceptions. | |
698 | */ | |
699 | uasm_i_mfc0(&p, T0, C0_CONFIG5); | |
700 | uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */ | |
701 | uasm_il_beqz(&p, &r, T0, label_msa_1); | |
702 | uasm_i_nop(&p); | |
703 | uasm_i_cfcmsa(&p, T0, MSA_CSR); | |
704 | uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr), | |
705 | K1); | |
706 | uasm_i_ctcmsa(&p, MSA_CSR, ZERO); | |
707 | uasm_l_msa_1(&l, p); | |
708 | } | |
90e9311a | 709 | |
1934a3ad JH |
710 | #ifdef CONFIG_KVM_MIPS_VZ |
711 | /* Restore host ASID */ | |
712 | if (!cpu_has_guestid) { | |
713 | UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi), | |
714 | K1); | |
715 | UASM_i_MTC0(&p, K0, C0_ENTRYHI); | |
716 | } | |
717 | ||
718 | /* | |
719 | * Set up normal Linux process pgd. | |
720 | * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD(): | |
721 | * - call tlbmiss_handler_setup_pgd(mm->pgd) | |
722 | * - write mm->pgd into CP0_PWBase | |
723 | */ | |
724 | UASM_i_LW(&p, A0, | |
725 | offsetof(struct kvm_vcpu_arch, host_pgd), K1); | |
726 | UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd); | |
727 | uasm_i_jalr(&p, RA, T9); | |
728 | /* delay slot */ | |
729 | if (cpu_has_htw) | |
730 | UASM_i_MTC0(&p, A0, C0_PWBASE); | |
731 | else | |
732 | uasm_i_nop(&p); | |
733 | ||
734 | /* Clear GM bit so we don't enter guest mode when EXL is cleared */ | |
735 | uasm_i_mfc0(&p, K0, C0_GUESTCTL0); | |
736 | uasm_i_ins(&p, K0, ZERO, MIPS_GCTL0_GM_SHIFT, 1); | |
737 | uasm_i_mtc0(&p, K0, C0_GUESTCTL0); | |
738 | ||
739 | /* Save GuestCtl0 so we can access GExcCode after CPU migration */ | |
740 | uasm_i_sw(&p, K0, | |
741 | offsetof(struct kvm_vcpu_arch, host_cp0_guestctl0), K1); | |
742 | ||
743 | if (cpu_has_guestid) { | |
744 | /* | |
745 | * Clear root mode GuestID, so that root TLB operations use the | |
746 | * root GuestID in the root TLB. | |
747 | */ | |
748 | uasm_i_mfc0(&p, T0, C0_GUESTCTL1); | |
749 | /* Set GuestCtl1.RID = MIPS_GCTL1_ROOT_GUESTID (i.e. 0) */ | |
750 | uasm_i_ins(&p, T0, ZERO, MIPS_GCTL1_RID_SHIFT, | |
751 | MIPS_GCTL1_RID_WIDTH); | |
752 | uasm_i_mtc0(&p, T0, C0_GUESTCTL1); | |
753 | } | |
754 | #endif | |
755 | ||
90e9311a JH |
756 | /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ |
757 | uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE)); | |
758 | uasm_i_and(&p, V0, V0, AT); | |
759 | uasm_i_lui(&p, AT, ST0_CU0 >> 16); | |
760 | uasm_i_or(&p, V0, V0, AT); | |
4c881451 JH |
761 | #ifdef CONFIG_64BIT |
762 | uasm_i_ori(&p, V0, V0, ST0_SX | ST0_UX); | |
763 | #endif | |
90e9311a JH |
764 | uasm_i_mtc0(&p, V0, C0_STATUS); |
765 | uasm_i_ehb(&p); | |
766 | ||
767 | /* Load up host GP */ | |
768 | UASM_i_LW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1); | |
769 | ||
770 | /* Need a stack before we can jump to "C" */ | |
771 | UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1); | |
772 | ||
773 | /* Saved host state */ | |
e41637d8 | 774 | UASM_i_ADDIU(&p, SP, SP, -(int)sizeof(struct pt_regs)); |
90e9311a JH |
775 | |
776 | /* | |
777 | * XXXKYMA do we need to load the host ASID, maybe not because the | |
778 | * kernel entries are marked GLOBAL, need to verify | |
779 | */ | |
780 | ||
1e5217f5 JH |
781 | /* Restore host scratch registers, as we'll have clobbered them */ |
782 | kvm_mips_build_restore_scratch(&p, K0, SP); | |
90e9311a JH |
783 | |
784 | /* Restore RDHWR access */ | |
785 | UASM_i_LA_mostly(&p, K0, (long)&hwrena); | |
786 | uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0); | |
787 | uasm_i_mtc0(&p, K0, C0_HWRENA); | |
788 | ||
789 | /* Jump to handler */ | |
790 | /* | |
791 | * XXXKYMA: not sure if this is safe, how large is the stack?? | |
792 | * Now jump to the kvm_mips_handle_exit() to see if we can deal | |
793 | * with this in the kernel | |
794 | */ | |
1934a3ad JH |
795 | uasm_i_move(&p, A0, S0); |
796 | uasm_i_move(&p, A1, S1); | |
90e9311a JH |
797 | UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit); |
798 | uasm_i_jalr(&p, RA, T9); | |
e41637d8 | 799 | UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ); |
90e9311a JH |
800 | |
801 | uasm_resolve_relocs(relocs, labels); | |
802 | ||
803 | p = kvm_mips_build_ret_from_exit(p); | |
804 | ||
805 | return p; | |
806 | } | |
807 | ||
808 | /** | |
809 | * kvm_mips_build_ret_from_exit() - Assemble guest exit return handler. | |
810 | * @addr: Address to start writing code. | |
811 | * | |
812 | * Assemble the code to handle the return from kvm_mips_handle_exit(), either | |
813 | * resuming the guest or returning to the host depending on the return value. | |
814 | * | |
815 | * Returns: Next address after end of written function. | |
816 | */ | |
817 | static void *kvm_mips_build_ret_from_exit(void *addr) | |
818 | { | |
819 | u32 *p = addr; | |
820 | struct uasm_label labels[2]; | |
821 | struct uasm_reloc relocs[2]; | |
822 | struct uasm_label *l = labels; | |
823 | struct uasm_reloc *r = relocs; | |
824 | ||
825 | memset(labels, 0, sizeof(labels)); | |
826 | memset(relocs, 0, sizeof(relocs)); | |
827 | ||
828 | /* Return from handler Make sure interrupts are disabled */ | |
829 | uasm_i_di(&p, ZERO); | |
830 | uasm_i_ehb(&p); | |
831 | ||
832 | /* | |
833 | * XXXKYMA: k0/k1 could have been blown away if we processed | |
834 | * an exception while we were handling the exception from the | |
835 | * guest, reload k1 | |
836 | */ | |
837 | ||
838 | uasm_i_move(&p, K1, S1); | |
e41637d8 | 839 | UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch)); |
90e9311a JH |
840 | |
841 | /* | |
842 | * Check return value, should tell us if we are returning to the | |
843 | * host (handle I/O etc)or resuming the guest | |
844 | */ | |
845 | uasm_i_andi(&p, T0, V0, RESUME_HOST); | |
846 | uasm_il_bnez(&p, &r, T0, label_return_to_host); | |
847 | uasm_i_nop(&p); | |
848 | ||
849 | p = kvm_mips_build_ret_to_guest(p); | |
850 | ||
851 | uasm_l_return_to_host(&l, p); | |
852 | p = kvm_mips_build_ret_to_host(p); | |
853 | ||
854 | uasm_resolve_relocs(relocs, labels); | |
855 | ||
856 | return p; | |
857 | } | |
858 | ||
859 | /** | |
860 | * kvm_mips_build_ret_to_guest() - Assemble code to return to the guest. | |
861 | * @addr: Address to start writing code. | |
862 | * | |
863 | * Assemble the code to handle return from the guest exit handler | |
864 | * (kvm_mips_handle_exit()) back to the guest. | |
865 | * | |
866 | * Returns: Next address after end of written function. | |
867 | */ | |
868 | static void *kvm_mips_build_ret_to_guest(void *addr) | |
869 | { | |
870 | u32 *p = addr; | |
871 | ||
1e5217f5 | 872 | /* Put the saved pointer to vcpu (s1) back into the scratch register */ |
e41637d8 | 873 | UASM_i_MTC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]); |
90e9311a JH |
874 | |
875 | /* Load up the Guest EBASE to minimize the window where BEV is set */ | |
876 | UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1); | |
877 | ||
878 | /* Switch EBASE back to the one used by KVM */ | |
879 | uasm_i_mfc0(&p, V1, C0_STATUS); | |
880 | uasm_i_lui(&p, AT, ST0_BEV >> 16); | |
881 | uasm_i_or(&p, K0, V1, AT); | |
882 | uasm_i_mtc0(&p, K0, C0_STATUS); | |
883 | uasm_i_ehb(&p); | |
0d17aea5 | 884 | build_set_exc_base(&p, T0); |
90e9311a JH |
885 | |
886 | /* Setup status register for running guest in UM */ | |
887 | uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE); | |
4c881451 | 888 | UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX | ST0_SX | ST0_UX)); |
90e9311a JH |
889 | uasm_i_and(&p, V1, V1, AT); |
890 | uasm_i_mtc0(&p, V1, C0_STATUS); | |
891 | uasm_i_ehb(&p); | |
892 | ||
893 | p = kvm_mips_build_enter_guest(p); | |
894 | ||
895 | return p; | |
896 | } | |
897 | ||
898 | /** | |
899 | * kvm_mips_build_ret_to_host() - Assemble code to return to the host. | |
900 | * @addr: Address to start writing code. | |
901 | * | |
902 | * Assemble the code to handle return from the guest exit handler | |
903 | * (kvm_mips_handle_exit()) back to the host, i.e. to the caller of the vcpu_run | |
904 | * function generated by kvm_mips_build_vcpu_run(). | |
905 | * | |
906 | * Returns: Next address after end of written function. | |
907 | */ | |
908 | static void *kvm_mips_build_ret_to_host(void *addr) | |
909 | { | |
910 | u32 *p = addr; | |
911 | unsigned int i; | |
912 | ||
913 | /* EBASE is already pointing to Linux */ | |
914 | UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, host_stack), K1); | |
e41637d8 | 915 | UASM_i_ADDIU(&p, K1, K1, -(int)sizeof(struct pt_regs)); |
90e9311a | 916 | |
90e9311a JH |
917 | /* |
918 | * r2/v0 is the return code, shift it down by 2 (arithmetic) | |
919 | * to recover the err code | |
920 | */ | |
921 | uasm_i_sra(&p, K0, V0, 2); | |
922 | uasm_i_move(&p, V0, K0); | |
923 | ||
924 | /* Load context saved on the host stack */ | |
925 | for (i = 16; i < 31; ++i) { | |
926 | if (i == 24) | |
927 | i = 28; | |
928 | UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), K1); | |
929 | } | |
930 | ||
90e9311a JH |
931 | /* Restore RDHWR access */ |
932 | UASM_i_LA_mostly(&p, K0, (long)&hwrena); | |
933 | uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0); | |
934 | uasm_i_mtc0(&p, K0, C0_HWRENA); | |
935 | ||
936 | /* Restore RA, which is the address we will return to */ | |
937 | UASM_i_LW(&p, RA, offsetof(struct pt_regs, regs[RA]), K1); | |
938 | uasm_i_jr(&p, RA); | |
939 | uasm_i_nop(&p); | |
940 | ||
941 | return p; | |
942 | } | |
943 |