Merge tag 'armsoc-tee' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / arch / mips / kvm / tlb.c
CommitLineData
858dd5d4 1/*
d116e812
DCZ
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7 * TLB handlers run from KSEG0
8 *
9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10 * Authors: Sanjay Lal <sanjayl@kymasys.com>
11 */
858dd5d4 12
858dd5d4
SL
13#include <linux/sched.h>
14#include <linux/smp.h>
15#include <linux/mm.h>
16#include <linux/delay.h>
403015b3 17#include <linux/export.h>
858dd5d4 18#include <linux/kvm_host.h>
6d17c0d1
SL
19#include <linux/srcu.h>
20
858dd5d4
SL
21#include <asm/cpu.h>
22#include <asm/bootinfo.h>
23#include <asm/mmu_context.h>
24#include <asm/pgtable.h>
25#include <asm/cacheflush.h>
e36059e5 26#include <asm/tlb.h>
e922a4cb 27#include <asm/tlbdebug.h>
858dd5d4
SL
28
29#undef CONFIG_MIPS_MT
30#include <asm/r4kcache.h>
31#define CONFIG_MIPS_MT
32
33#define KVM_GUEST_PC_TLB 0
34#define KVM_GUEST_SP_TLB 1
35
372582a6 36#ifdef CONFIG_KVM_MIPS_VZ
c992a4f6
JH
37unsigned long GUESTID_MASK;
38EXPORT_SYMBOL_GPL(GUESTID_MASK);
39unsigned long GUESTID_FIRST_VERSION;
40EXPORT_SYMBOL_GPL(GUESTID_FIRST_VERSION);
41unsigned long GUESTID_VERSION_MASK;
42EXPORT_SYMBOL_GPL(GUESTID_VERSION_MASK);
43
372582a6
JH
44static u32 kvm_mips_get_root_asid(struct kvm_vcpu *vcpu)
45{
46 struct mm_struct *gpa_mm = &vcpu->kvm->arch.gpa_mm;
47
48 if (cpu_has_guestid)
49 return 0;
50 else
51 return cpu_asid(smp_processor_id(), gpa_mm);
52}
53#endif
54
403015b3 55static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
858dd5d4 56{
c550d539 57 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
4edf00a4
PB
58 int cpu = smp_processor_id();
59
c550d539 60 return cpu_asid(cpu, kern_mm);
858dd5d4
SL
61}
62
403015b3 63static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
858dd5d4 64{
c550d539 65 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
4edf00a4
PB
66 int cpu = smp_processor_id();
67
c550d539 68 return cpu_asid(cpu, user_mm);
858dd5d4
SL
69}
70
d116e812 71/* Structure defining an tlb entry data set. */
858dd5d4
SL
72
73void kvm_mips_dump_host_tlbs(void)
74{
858dd5d4 75 unsigned long flags;
858dd5d4
SL
76
77 local_irq_save(flags);
78
6ad78a5c 79 kvm_info("HOST TLBs:\n");
e922a4cb
JH
80 dump_tlb_regs();
81 pr_info("\n");
82 dump_tlb_all();
83
858dd5d4
SL
84 local_irq_restore(flags);
85}
cb1b447f 86EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs);
858dd5d4
SL
87
88void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
89{
90 struct mips_coproc *cop0 = vcpu->arch.cop0;
91 struct kvm_mips_tlb tlb;
92 int i;
93
6ad78a5c
DCZ
94 kvm_info("Guest TLBs:\n");
95 kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
858dd5d4
SL
96
97 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
98 tlb = vcpu->arch.guest_tlb[i];
6ad78a5c 99 kvm_info("TLB%c%3d Hi 0x%08lx ",
e6207bbe 100 (tlb.tlb_lo[0] | tlb.tlb_lo[1]) & ENTRYLO_V
9fbfb06a 101 ? ' ' : '*',
6ad78a5c 102 i, tlb.tlb_hi);
8cffd197 103 kvm_info("Lo0=0x%09llx %c%c attr %lx ",
9fbfb06a 104 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[0]),
e6207bbe
JH
105 (tlb.tlb_lo[0] & ENTRYLO_D) ? 'D' : ' ',
106 (tlb.tlb_lo[0] & ENTRYLO_G) ? 'G' : ' ',
107 (tlb.tlb_lo[0] & ENTRYLO_C) >> ENTRYLO_C_SHIFT);
8cffd197 108 kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n",
9fbfb06a 109 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[1]),
e6207bbe
JH
110 (tlb.tlb_lo[1] & ENTRYLO_D) ? 'D' : ' ',
111 (tlb.tlb_lo[1] & ENTRYLO_G) ? 'G' : ' ',
112 (tlb.tlb_lo[1] & ENTRYLO_C) >> ENTRYLO_C_SHIFT,
113 tlb.tlb_mask);
858dd5d4
SL
114 }
115}
cb1b447f 116EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs);
858dd5d4 117
858dd5d4
SL
118int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
119{
120 int i;
121 int index = -1;
122 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
123
858dd5d4 124 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
d116e812
DCZ
125 if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
126 TLB_HI_ASID_HIT(tlb[i], entryhi)) {
858dd5d4
SL
127 index = i;
128 break;
129 }
130 }
131
858dd5d4 132 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
9fbfb06a 133 __func__, entryhi, index, tlb[i].tlb_lo[0], tlb[i].tlb_lo[1]);
858dd5d4
SL
134
135 return index;
136}
cb1b447f 137EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup);
858dd5d4 138
57e3869c 139static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
858dd5d4
SL
140{
141 int idx;
858dd5d4 142
57e3869c 143 write_c0_entryhi(entryhi);
858dd5d4
SL
144 mtc0_tlbw_hazard();
145
146 tlb_probe();
147 tlb_probe_hazard();
148 idx = read_c0_index();
149
150 if (idx >= current_cpu_data.tlbsize)
151 BUG();
152
f3a8603f 153 if (idx >= 0) {
858dd5d4 154 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
858dd5d4 155 write_c0_entrylo0(0);
858dd5d4
SL
156 write_c0_entrylo1(0);
157 mtc0_tlbw_hazard();
158
159 tlb_write_indexed();
138f7ad9 160 tlbw_use_hazard();
858dd5d4
SL
161 }
162
57e3869c
JH
163 return idx;
164}
165
166int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
167 bool user, bool kernel)
168{
169 int idx_user, idx_kernel;
170 unsigned long flags, old_entryhi;
171
172 local_irq_save(flags);
173
174 old_entryhi = read_c0_entryhi();
175
176 if (user)
177 idx_user = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
178 kvm_mips_get_user_asid(vcpu));
179 if (kernel)
180 idx_kernel = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
181 kvm_mips_get_kernel_asid(vcpu));
182
858dd5d4
SL
183 write_c0_entryhi(old_entryhi);
184 mtc0_tlbw_hazard();
858dd5d4
SL
185
186 local_irq_restore(flags);
187
1c506c9c
JH
188 /*
189 * We don't want to get reserved instruction exceptions for missing tlb
190 * entries.
191 */
192 if (cpu_has_vtag_icache)
193 flush_icache_all();
194
57e3869c
JH
195 if (user && idx_user >= 0)
196 kvm_debug("%s: Invalidated guest user entryhi %#lx @ idx %d\n",
197 __func__, (va & VPN2_MASK) |
198 kvm_mips_get_user_asid(vcpu), idx_user);
199 if (kernel && idx_kernel >= 0)
200 kvm_debug("%s: Invalidated guest kernel entryhi %#lx @ idx %d\n",
201 __func__, (va & VPN2_MASK) |
202 kvm_mips_get_kernel_asid(vcpu), idx_kernel);
858dd5d4
SL
203
204 return 0;
205}
cb1b447f 206EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv);
858dd5d4 207
372582a6
JH
208#ifdef CONFIG_KVM_MIPS_VZ
209
210/* GuestID management */
211
212/**
213 * clear_root_gid() - Set GuestCtl1.RID for normal root operation.
214 */
215static inline void clear_root_gid(void)
216{
217 if (cpu_has_guestid) {
218 clear_c0_guestctl1(MIPS_GCTL1_RID);
219 mtc0_tlbw_hazard();
220 }
221}
222
223/**
224 * set_root_gid_to_guest_gid() - Set GuestCtl1.RID to match GuestCtl1.ID.
225 *
226 * Sets the root GuestID to match the current guest GuestID, for TLB operation
227 * on the GPA->RPA mappings in the root TLB.
228 *
229 * The caller must be sure to disable HTW while the root GID is set, and
230 * possibly longer if TLB registers are modified.
231 */
232static inline void set_root_gid_to_guest_gid(void)
233{
234 unsigned int guestctl1;
235
236 if (cpu_has_guestid) {
237 back_to_back_c0_hazard();
238 guestctl1 = read_c0_guestctl1();
239 guestctl1 = (guestctl1 & ~MIPS_GCTL1_RID) |
240 ((guestctl1 & MIPS_GCTL1_ID) >> MIPS_GCTL1_ID_SHIFT)
241 << MIPS_GCTL1_RID_SHIFT;
242 write_c0_guestctl1(guestctl1);
243 mtc0_tlbw_hazard();
244 }
245}
246
247int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
248{
249 int idx;
250 unsigned long flags, old_entryhi;
251
252 local_irq_save(flags);
253 htw_stop();
254
255 /* Set root GuestID for root probe and write of guest TLB entry */
256 set_root_gid_to_guest_gid();
257
258 old_entryhi = read_c0_entryhi();
259
260 idx = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
261 kvm_mips_get_root_asid(vcpu));
262
263 write_c0_entryhi(old_entryhi);
264 clear_root_gid();
265 mtc0_tlbw_hazard();
266
267 htw_start();
268 local_irq_restore(flags);
269
1c506c9c
JH
270 /*
271 * We don't want to get reserved instruction exceptions for missing tlb
272 * entries.
273 */
274 if (cpu_has_vtag_icache)
275 flush_icache_all();
276
372582a6
JH
277 if (idx > 0)
278 kvm_debug("%s: Invalidated root entryhi %#lx @ idx %d\n",
279 __func__, (va & VPN2_MASK) |
280 kvm_mips_get_root_asid(vcpu), idx);
281
282 return 0;
283}
284EXPORT_SYMBOL_GPL(kvm_vz_host_tlb_inv);
285
286/**
287 * kvm_vz_guest_tlb_lookup() - Lookup a guest VZ TLB mapping.
288 * @vcpu: KVM VCPU pointer.
289 * @gpa: Guest virtual address in a TLB mapped guest segment.
290 * @gpa: Ponter to output guest physical address it maps to.
291 *
292 * Converts a guest virtual address in a guest TLB mapped segment to a guest
293 * physical address, by probing the guest TLB.
294 *
295 * Returns: 0 if guest TLB mapping exists for @gva. *@gpa will have been
296 * written.
297 * -EFAULT if no guest TLB mapping exists for @gva. *@gpa may not
298 * have been written.
299 */
300int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
301 unsigned long *gpa)
302{
303 unsigned long o_entryhi, o_entrylo[2], o_pagemask;
304 unsigned int o_index;
305 unsigned long entrylo[2], pagemask, pagemaskbit, pa;
306 unsigned long flags;
307 int index;
308
309 /* Probe the guest TLB for a mapping */
310 local_irq_save(flags);
311 /* Set root GuestID for root probe of guest TLB entry */
312 htw_stop();
313 set_root_gid_to_guest_gid();
314
315 o_entryhi = read_gc0_entryhi();
316 o_index = read_gc0_index();
317
318 write_gc0_entryhi((o_entryhi & 0x3ff) | (gva & ~0xfffl));
319 mtc0_tlbw_hazard();
320 guest_tlb_probe();
321 tlb_probe_hazard();
322
323 index = read_gc0_index();
324 if (index < 0) {
325 /* No match, fail */
326 write_gc0_entryhi(o_entryhi);
327 write_gc0_index(o_index);
328
329 clear_root_gid();
330 htw_start();
331 local_irq_restore(flags);
332 return -EFAULT;
333 }
334
335 /* Match! read the TLB entry */
336 o_entrylo[0] = read_gc0_entrylo0();
337 o_entrylo[1] = read_gc0_entrylo1();
338 o_pagemask = read_gc0_pagemask();
339
340 mtc0_tlbr_hazard();
341 guest_tlb_read();
342 tlb_read_hazard();
343
344 entrylo[0] = read_gc0_entrylo0();
345 entrylo[1] = read_gc0_entrylo1();
346 pagemask = ~read_gc0_pagemask() & ~0x1fffl;
347
348 write_gc0_entryhi(o_entryhi);
349 write_gc0_index(o_index);
350 write_gc0_entrylo0(o_entrylo[0]);
351 write_gc0_entrylo1(o_entrylo[1]);
352 write_gc0_pagemask(o_pagemask);
353
354 clear_root_gid();
355 htw_start();
356 local_irq_restore(flags);
357
358 /* Select one of the EntryLo values and interpret the GPA */
359 pagemaskbit = (pagemask ^ (pagemask & (pagemask - 1))) >> 1;
360 pa = entrylo[!!(gva & pagemaskbit)];
361
362 /*
363 * TLB entry may have become invalid since TLB probe if physical FTLB
364 * entries are shared between threads (e.g. I6400).
365 */
366 if (!(pa & ENTRYLO_V))
367 return -EFAULT;
368
369 /*
370 * Note, this doesn't take guest MIPS32 XPA into account, where PFN is
371 * split with XI/RI in the middle.
372 */
373 pa = (pa << 6) & ~0xfffl;
374 pa |= gva & ~(pagemask | pagemaskbit);
375
376 *gpa = pa;
377 return 0;
378}
379EXPORT_SYMBOL_GPL(kvm_vz_guest_tlb_lookup);
380
381/**
382 * kvm_vz_local_flush_roottlb_all_guests() - Flush all root TLB entries for
383 * guests.
384 *
385 * Invalidate all entries in root tlb which are GPA mappings.
386 */
387void kvm_vz_local_flush_roottlb_all_guests(void)
388{
389 unsigned long flags;
390 unsigned long old_entryhi, old_pagemask, old_guestctl1;
391 int entry;
392
393 if (WARN_ON(!cpu_has_guestid))
394 return;
395
396 local_irq_save(flags);
397 htw_stop();
398
399 /* TLBR may clobber EntryHi.ASID, PageMask, and GuestCtl1.RID */
400 old_entryhi = read_c0_entryhi();
401 old_pagemask = read_c0_pagemask();
402 old_guestctl1 = read_c0_guestctl1();
403
404 /*
405 * Invalidate guest entries in root TLB while leaving root entries
406 * intact when possible.
407 */
408 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
409 write_c0_index(entry);
410 mtc0_tlbw_hazard();
411 tlb_read();
412 tlb_read_hazard();
413
414 /* Don't invalidate non-guest (RVA) mappings in the root TLB */
415 if (!(read_c0_guestctl1() & MIPS_GCTL1_RID))
416 continue;
417
418 /* Make sure all entries differ. */
419 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
420 write_c0_entrylo0(0);
421 write_c0_entrylo1(0);
422 write_c0_guestctl1(0);
423 mtc0_tlbw_hazard();
424 tlb_write_indexed();
425 }
426
427 write_c0_entryhi(old_entryhi);
428 write_c0_pagemask(old_pagemask);
429 write_c0_guestctl1(old_guestctl1);
430 tlbw_use_hazard();
431
432 htw_start();
433 local_irq_restore(flags);
434}
435EXPORT_SYMBOL_GPL(kvm_vz_local_flush_roottlb_all_guests);
436
437/**
438 * kvm_vz_local_flush_guesttlb_all() - Flush all guest TLB entries.
439 *
440 * Invalidate all entries in guest tlb irrespective of guestid.
441 */
442void kvm_vz_local_flush_guesttlb_all(void)
443{
444 unsigned long flags;
445 unsigned long old_index;
446 unsigned long old_entryhi;
447 unsigned long old_entrylo[2];
448 unsigned long old_pagemask;
449 int entry;
824533ad 450 u64 cvmmemctl2 = 0;
372582a6
JH
451
452 local_irq_save(flags);
453
454 /* Preserve all clobbered guest registers */
455 old_index = read_gc0_index();
456 old_entryhi = read_gc0_entryhi();
457 old_entrylo[0] = read_gc0_entrylo0();
458 old_entrylo[1] = read_gc0_entrylo1();
459 old_pagemask = read_gc0_pagemask();
460
824533ad
JH
461 switch (current_cpu_type()) {
462 case CPU_CAVIUM_OCTEON3:
463 /* Inhibit machine check due to multiple matching TLB entries */
464 cvmmemctl2 = read_c0_cvmmemctl2();
465 cvmmemctl2 |= CVMMEMCTL2_INHIBITTS;
466 write_c0_cvmmemctl2(cvmmemctl2);
467 break;
468 };
469
372582a6
JH
470 /* Invalidate guest entries in guest TLB */
471 write_gc0_entrylo0(0);
472 write_gc0_entrylo1(0);
473 write_gc0_pagemask(0);
474 for (entry = 0; entry < current_cpu_data.guest.tlbsize; entry++) {
475 /* Make sure all entries differ. */
476 write_gc0_index(entry);
477 write_gc0_entryhi(UNIQUE_GUEST_ENTRYHI(entry));
478 mtc0_tlbw_hazard();
479 guest_tlb_write_indexed();
480 }
824533ad
JH
481
482 if (cvmmemctl2) {
483 cvmmemctl2 &= ~CVMMEMCTL2_INHIBITTS;
484 write_c0_cvmmemctl2(cvmmemctl2);
485 };
486
372582a6
JH
487 write_gc0_index(old_index);
488 write_gc0_entryhi(old_entryhi);
489 write_gc0_entrylo0(old_entrylo[0]);
490 write_gc0_entrylo1(old_entrylo[1]);
491 write_gc0_pagemask(old_pagemask);
492 tlbw_use_hazard();
493
494 local_irq_restore(flags);
495}
496EXPORT_SYMBOL_GPL(kvm_vz_local_flush_guesttlb_all);
497
498/**
499 * kvm_vz_save_guesttlb() - Save a range of guest TLB entries.
500 * @buf: Buffer to write TLB entries into.
501 * @index: Start index.
502 * @count: Number of entries to save.
503 *
504 * Save a range of guest TLB entries. The caller must ensure interrupts are
505 * disabled.
506 */
507void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index,
508 unsigned int count)
509{
510 unsigned int end = index + count;
511 unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask;
512 unsigned int guestctl1 = 0;
513 int old_index, i;
514
515 /* Save registers we're about to clobber */
516 old_index = read_gc0_index();
517 old_entryhi = read_gc0_entryhi();
518 old_entrylo0 = read_gc0_entrylo0();
519 old_entrylo1 = read_gc0_entrylo1();
520 old_pagemask = read_gc0_pagemask();
521
522 /* Set root GuestID for root probe */
523 htw_stop();
524 set_root_gid_to_guest_gid();
525 if (cpu_has_guestid)
526 guestctl1 = read_c0_guestctl1();
527
528 /* Read each entry from guest TLB */
529 for (i = index; i < end; ++i, ++buf) {
530 write_gc0_index(i);
531
532 mtc0_tlbr_hazard();
533 guest_tlb_read();
534 tlb_read_hazard();
535
536 if (cpu_has_guestid &&
537 (read_c0_guestctl1() ^ guestctl1) & MIPS_GCTL1_RID) {
538 /* Entry invalid or belongs to another guest */
539 buf->tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
540 buf->tlb_lo[0] = 0;
541 buf->tlb_lo[1] = 0;
542 buf->tlb_mask = 0;
543 } else {
544 /* Entry belongs to the right guest */
545 buf->tlb_hi = read_gc0_entryhi();
546 buf->tlb_lo[0] = read_gc0_entrylo0();
547 buf->tlb_lo[1] = read_gc0_entrylo1();
548 buf->tlb_mask = read_gc0_pagemask();
549 }
550 }
551
552 /* Clear root GuestID again */
553 clear_root_gid();
554 htw_start();
555
556 /* Restore clobbered registers */
557 write_gc0_index(old_index);
558 write_gc0_entryhi(old_entryhi);
559 write_gc0_entrylo0(old_entrylo0);
560 write_gc0_entrylo1(old_entrylo1);
561 write_gc0_pagemask(old_pagemask);
562
563 tlbw_use_hazard();
564}
565EXPORT_SYMBOL_GPL(kvm_vz_save_guesttlb);
566
567/**
568 * kvm_vz_load_guesttlb() - Save a range of guest TLB entries.
569 * @buf: Buffer to read TLB entries from.
570 * @index: Start index.
571 * @count: Number of entries to load.
572 *
573 * Load a range of guest TLB entries. The caller must ensure interrupts are
574 * disabled.
575 */
576void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
577 unsigned int count)
578{
579 unsigned int end = index + count;
580 unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask;
581 int old_index, i;
582
583 /* Save registers we're about to clobber */
584 old_index = read_gc0_index();
585 old_entryhi = read_gc0_entryhi();
586 old_entrylo0 = read_gc0_entrylo0();
587 old_entrylo1 = read_gc0_entrylo1();
588 old_pagemask = read_gc0_pagemask();
589
590 /* Set root GuestID for root probe */
591 htw_stop();
592 set_root_gid_to_guest_gid();
593
594 /* Write each entry to guest TLB */
595 for (i = index; i < end; ++i, ++buf) {
596 write_gc0_index(i);
597 write_gc0_entryhi(buf->tlb_hi);
598 write_gc0_entrylo0(buf->tlb_lo[0]);
599 write_gc0_entrylo1(buf->tlb_lo[1]);
600 write_gc0_pagemask(buf->tlb_mask);
601
602 mtc0_tlbw_hazard();
603 guest_tlb_write_indexed();
604 }
605
606 /* Clear root GuestID again */
607 clear_root_gid();
608 htw_start();
609
610 /* Restore clobbered registers */
611 write_gc0_index(old_index);
612 write_gc0_entryhi(old_entryhi);
613 write_gc0_entrylo0(old_entrylo0);
614 write_gc0_entrylo1(old_entrylo1);
615 write_gc0_pagemask(old_pagemask);
616
617 tlbw_use_hazard();
618}
619EXPORT_SYMBOL_GPL(kvm_vz_load_guesttlb);
620
621#endif
622
a7ebb2e4
JH
623/**
624 * kvm_mips_suspend_mm() - Suspend the active mm.
625 * @cpu The CPU we're running on.
626 *
627 * Suspend the active_mm, ready for a switch to a KVM guest virtual address
628 * space. This is left active for the duration of guest context, including time
629 * with interrupts enabled, so we need to be careful not to confuse e.g. cache
630 * management IPIs.
631 *
632 * kvm_mips_resume_mm() should be called before context switching to a different
633 * process so we don't need to worry about reference counting.
634 *
635 * This needs to be in static kernel code to avoid exporting init_mm.
636 */
637void kvm_mips_suspend_mm(int cpu)
638{
639 cpumask_clear_cpu(cpu, mm_cpumask(current->active_mm));
640 current->active_mm = &init_mm;
641}
642EXPORT_SYMBOL_GPL(kvm_mips_suspend_mm);
643
644/**
645 * kvm_mips_resume_mm() - Resume the current process mm.
646 * @cpu The CPU we're running on.
647 *
648 * Resume the mm of the current process, after a switch back from a KVM guest
649 * virtual address space (see kvm_mips_suspend_mm()).
650 */
651void kvm_mips_resume_mm(int cpu)
652{
653 cpumask_set_cpu(cpu, mm_cpumask(current->mm));
654 current->active_mm = current->mm;
655}
656EXPORT_SYMBOL_GPL(kvm_mips_resume_mm);