2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
3 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
6 * Paul Mackerras <paulus@au1.ibm.com>
7 * Alexander Graf <agraf@suse.de>
8 * Kevin Wolf <mail@kevin-wolf.de>
10 * Description: KVM functions specific to running on Book 3S
11 * processors in hypervisor mode (specifically POWER7 and later).
13 * This file is derived from arch/powerpc/kvm/book3s.c,
14 * by Alexander Graf <agraf@suse.de>.
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License, version 2, as
18 * published by the Free Software Foundation.
21 #include <linux/kvm_host.h>
22 #include <linux/err.h>
23 #include <linux/slab.h>
24 #include <linux/preempt.h>
25 #include <linux/sched.h>
26 #include <linux/delay.h>
27 #include <linux/export.h>
29 #include <linux/anon_inodes.h>
30 #include <linux/cpumask.h>
31 #include <linux/spinlock.h>
32 #include <linux/page-flags.h>
35 #include <asm/cputable.h>
36 #include <asm/cacheflush.h>
37 #include <asm/tlbflush.h>
38 #include <asm/uaccess.h>
40 #include <asm/kvm_ppc.h>
41 #include <asm/kvm_book3s.h>
42 #include <asm/mmu_context.h>
43 #include <asm/lppaca.h>
44 #include <asm/processor.h>
45 #include <asm/cputhreads.h>
47 #include <asm/hvcall.h>
48 #include <asm/switch_to.h>
49 #include <linux/gfp.h>
50 #include <linux/vmalloc.h>
51 #include <linux/highmem.h>
52 #include <linux/hugetlb.h>
54 /* #define EXIT_DEBUG */
55 /* #define EXIT_DEBUG_SIMPLE */
56 /* #define EXIT_DEBUG_INT */
58 static void kvmppc_end_cede(struct kvm_vcpu
*vcpu
);
59 static int kvmppc_hv_setup_rma(struct kvm_vcpu
*vcpu
);
61 void kvmppc_core_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
63 struct kvmppc_vcore
*vc
= vcpu
->arch
.vcore
;
65 local_paca
->kvm_hstate
.kvm_vcpu
= vcpu
;
66 local_paca
->kvm_hstate
.kvm_vcore
= vc
;
67 if (vc
->runner
== vcpu
&& vc
->vcore_state
!= VCORE_INACTIVE
)
68 vc
->stolen_tb
+= mftb() - vc
->preempt_tb
;
71 void kvmppc_core_vcpu_put(struct kvm_vcpu
*vcpu
)
73 struct kvmppc_vcore
*vc
= vcpu
->arch
.vcore
;
75 if (vc
->runner
== vcpu
&& vc
->vcore_state
!= VCORE_INACTIVE
)
76 vc
->preempt_tb
= mftb();
79 void kvmppc_set_msr(struct kvm_vcpu
*vcpu
, u64 msr
)
81 vcpu
->arch
.shregs
.msr
= msr
;
82 kvmppc_end_cede(vcpu
);
85 void kvmppc_set_pvr(struct kvm_vcpu
*vcpu
, u32 pvr
)
90 void kvmppc_dump_regs(struct kvm_vcpu
*vcpu
)
94 pr_err("vcpu %p (%d):\n", vcpu
, vcpu
->vcpu_id
);
95 pr_err("pc = %.16lx msr = %.16llx trap = %x\n",
96 vcpu
->arch
.pc
, vcpu
->arch
.shregs
.msr
, vcpu
->arch
.trap
);
97 for (r
= 0; r
< 16; ++r
)
98 pr_err("r%2d = %.16lx r%d = %.16lx\n",
99 r
, kvmppc_get_gpr(vcpu
, r
),
100 r
+16, kvmppc_get_gpr(vcpu
, r
+16));
101 pr_err("ctr = %.16lx lr = %.16lx\n",
102 vcpu
->arch
.ctr
, vcpu
->arch
.lr
);
103 pr_err("srr0 = %.16llx srr1 = %.16llx\n",
104 vcpu
->arch
.shregs
.srr0
, vcpu
->arch
.shregs
.srr1
);
105 pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
106 vcpu
->arch
.shregs
.sprg0
, vcpu
->arch
.shregs
.sprg1
);
107 pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
108 vcpu
->arch
.shregs
.sprg2
, vcpu
->arch
.shregs
.sprg3
);
109 pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n",
110 vcpu
->arch
.cr
, vcpu
->arch
.xer
, vcpu
->arch
.shregs
.dsisr
);
111 pr_err("dar = %.16llx\n", vcpu
->arch
.shregs
.dar
);
112 pr_err("fault dar = %.16lx dsisr = %.8x\n",
113 vcpu
->arch
.fault_dar
, vcpu
->arch
.fault_dsisr
);
114 pr_err("SLB (%d entries):\n", vcpu
->arch
.slb_max
);
115 for (r
= 0; r
< vcpu
->arch
.slb_max
; ++r
)
116 pr_err(" ESID = %.16llx VSID = %.16llx\n",
117 vcpu
->arch
.slb
[r
].orige
, vcpu
->arch
.slb
[r
].origv
);
118 pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
119 vcpu
->kvm
->arch
.lpcr
, vcpu
->kvm
->arch
.sdr1
,
120 vcpu
->arch
.last_inst
);
123 struct kvm_vcpu
*kvmppc_find_vcpu(struct kvm
*kvm
, int id
)
126 struct kvm_vcpu
*v
, *ret
= NULL
;
128 mutex_lock(&kvm
->lock
);
129 kvm_for_each_vcpu(r
, v
, kvm
) {
130 if (v
->vcpu_id
== id
) {
135 mutex_unlock(&kvm
->lock
);
139 static void init_vpa(struct kvm_vcpu
*vcpu
, struct lppaca
*vpa
)
141 vpa
->shared_proc
= 1;
142 vpa
->yield_count
= 1;
145 /* Length for a per-processor buffer is passed in at offset 4 in the buffer */
154 static int vpa_is_registered(struct kvmppc_vpa
*vpap
)
156 if (vpap
->update_pending
)
157 return vpap
->next_gpa
!= 0;
158 return vpap
->pinned_addr
!= NULL
;
161 static unsigned long do_h_register_vpa(struct kvm_vcpu
*vcpu
,
163 unsigned long vcpuid
, unsigned long vpa
)
165 struct kvm
*kvm
= vcpu
->kvm
;
166 unsigned long len
, nb
;
168 struct kvm_vcpu
*tvcpu
;
171 struct kvmppc_vpa
*vpap
;
173 tvcpu
= kvmppc_find_vcpu(kvm
, vcpuid
);
177 subfunc
= (flags
>> H_VPA_FUNC_SHIFT
) & H_VPA_FUNC_MASK
;
178 if (subfunc
== H_VPA_REG_VPA
|| subfunc
== H_VPA_REG_DTL
||
179 subfunc
== H_VPA_REG_SLB
) {
180 /* Registering new area - address must be cache-line aligned */
181 if ((vpa
& (L1_CACHE_BYTES
- 1)) || !vpa
)
184 /* convert logical addr to kernel addr and read length */
185 va
= kvmppc_pin_guest_page(kvm
, vpa
, &nb
);
188 if (subfunc
== H_VPA_REG_VPA
)
189 len
= ((struct reg_vpa
*)va
)->length
.hword
;
191 len
= ((struct reg_vpa
*)va
)->length
.word
;
192 kvmppc_unpin_guest_page(kvm
, va
);
195 if (len
> nb
|| len
< sizeof(struct reg_vpa
))
204 spin_lock(&tvcpu
->arch
.vpa_update_lock
);
207 case H_VPA_REG_VPA
: /* register VPA */
208 if (len
< sizeof(struct lppaca
))
210 vpap
= &tvcpu
->arch
.vpa
;
214 case H_VPA_REG_DTL
: /* register DTL */
215 if (len
< sizeof(struct dtl_entry
))
217 len
-= len
% sizeof(struct dtl_entry
);
219 /* Check that they have previously registered a VPA */
221 if (!vpa_is_registered(&tvcpu
->arch
.vpa
))
224 vpap
= &tvcpu
->arch
.dtl
;
228 case H_VPA_REG_SLB
: /* register SLB shadow buffer */
229 /* Check that they have previously registered a VPA */
231 if (!vpa_is_registered(&tvcpu
->arch
.vpa
))
234 vpap
= &tvcpu
->arch
.slb_shadow
;
238 case H_VPA_DEREG_VPA
: /* deregister VPA */
239 /* Check they don't still have a DTL or SLB buf registered */
241 if (vpa_is_registered(&tvcpu
->arch
.dtl
) ||
242 vpa_is_registered(&tvcpu
->arch
.slb_shadow
))
245 vpap
= &tvcpu
->arch
.vpa
;
249 case H_VPA_DEREG_DTL
: /* deregister DTL */
250 vpap
= &tvcpu
->arch
.dtl
;
254 case H_VPA_DEREG_SLB
: /* deregister SLB shadow buffer */
255 vpap
= &tvcpu
->arch
.slb_shadow
;
261 vpap
->next_gpa
= vpa
;
263 vpap
->update_pending
= 1;
266 spin_unlock(&tvcpu
->arch
.vpa_update_lock
);
271 static void kvmppc_update_vpa(struct kvm
*kvm
, struct kvmppc_vpa
*vpap
)
276 vpap
->update_pending
= 0;
278 if (vpap
->next_gpa
) {
279 va
= kvmppc_pin_guest_page(kvm
, vpap
->next_gpa
, &nb
);
280 if (nb
< vpap
->len
) {
282 * If it's now too short, it must be that userspace
283 * has changed the mappings underlying guest memory,
284 * so unregister the region.
286 kvmppc_unpin_guest_page(kvm
, va
);
290 if (vpap
->pinned_addr
)
291 kvmppc_unpin_guest_page(kvm
, vpap
->pinned_addr
);
292 vpap
->pinned_addr
= va
;
294 vpap
->pinned_end
= va
+ vpap
->len
;
297 static void kvmppc_update_vpas(struct kvm_vcpu
*vcpu
)
299 struct kvm
*kvm
= vcpu
->kvm
;
301 spin_lock(&vcpu
->arch
.vpa_update_lock
);
302 if (vcpu
->arch
.vpa
.update_pending
) {
303 kvmppc_update_vpa(kvm
, &vcpu
->arch
.vpa
);
304 init_vpa(vcpu
, vcpu
->arch
.vpa
.pinned_addr
);
306 if (vcpu
->arch
.dtl
.update_pending
) {
307 kvmppc_update_vpa(kvm
, &vcpu
->arch
.dtl
);
308 vcpu
->arch
.dtl_ptr
= vcpu
->arch
.dtl
.pinned_addr
;
309 vcpu
->arch
.dtl_index
= 0;
311 if (vcpu
->arch
.slb_shadow
.update_pending
)
312 kvmppc_update_vpa(kvm
, &vcpu
->arch
.slb_shadow
);
313 spin_unlock(&vcpu
->arch
.vpa_update_lock
);
316 static void kvmppc_create_dtl_entry(struct kvm_vcpu
*vcpu
,
317 struct kvmppc_vcore
*vc
)
319 struct dtl_entry
*dt
;
321 unsigned long old_stolen
;
323 dt
= vcpu
->arch
.dtl_ptr
;
324 vpa
= vcpu
->arch
.vpa
.pinned_addr
;
325 old_stolen
= vcpu
->arch
.stolen_logged
;
326 vcpu
->arch
.stolen_logged
= vc
->stolen_tb
;
329 memset(dt
, 0, sizeof(struct dtl_entry
));
330 dt
->dispatch_reason
= 7;
331 dt
->processor_id
= vc
->pcpu
+ vcpu
->arch
.ptid
;
332 dt
->timebase
= mftb();
333 dt
->enqueue_to_dispatch_time
= vc
->stolen_tb
- old_stolen
;
334 dt
->srr0
= kvmppc_get_pc(vcpu
);
335 dt
->srr1
= vcpu
->arch
.shregs
.msr
;
337 if (dt
== vcpu
->arch
.dtl
.pinned_end
)
338 dt
= vcpu
->arch
.dtl
.pinned_addr
;
339 vcpu
->arch
.dtl_ptr
= dt
;
340 /* order writing *dt vs. writing vpa->dtl_idx */
342 vpa
->dtl_idx
= ++vcpu
->arch
.dtl_index
;
345 int kvmppc_pseries_do_hcall(struct kvm_vcpu
*vcpu
)
347 unsigned long req
= kvmppc_get_gpr(vcpu
, 3);
348 unsigned long target
, ret
= H_SUCCESS
;
349 struct kvm_vcpu
*tvcpu
;
353 ret
= kvmppc_virtmode_h_enter(vcpu
, kvmppc_get_gpr(vcpu
, 4),
354 kvmppc_get_gpr(vcpu
, 5),
355 kvmppc_get_gpr(vcpu
, 6),
356 kvmppc_get_gpr(vcpu
, 7));
361 target
= kvmppc_get_gpr(vcpu
, 4);
362 tvcpu
= kvmppc_find_vcpu(vcpu
->kvm
, target
);
367 tvcpu
->arch
.prodded
= 1;
369 if (vcpu
->arch
.ceded
) {
370 if (waitqueue_active(&vcpu
->wq
)) {
371 wake_up_interruptible(&vcpu
->wq
);
372 vcpu
->stat
.halt_wakeup
++;
379 ret
= do_h_register_vpa(vcpu
, kvmppc_get_gpr(vcpu
, 4),
380 kvmppc_get_gpr(vcpu
, 5),
381 kvmppc_get_gpr(vcpu
, 6));
386 kvmppc_set_gpr(vcpu
, 3, ret
);
387 vcpu
->arch
.hcall_needed
= 0;
391 static int kvmppc_handle_exit(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
392 struct task_struct
*tsk
)
396 vcpu
->stat
.sum_exits
++;
398 run
->exit_reason
= KVM_EXIT_UNKNOWN
;
399 run
->ready_for_interrupt_injection
= 1;
400 switch (vcpu
->arch
.trap
) {
401 /* We're good on these - the host merely wanted to get our attention */
402 case BOOK3S_INTERRUPT_HV_DECREMENTER
:
403 vcpu
->stat
.dec_exits
++;
406 case BOOK3S_INTERRUPT_EXTERNAL
:
407 vcpu
->stat
.ext_intr_exits
++;
410 case BOOK3S_INTERRUPT_PERFMON
:
413 case BOOK3S_INTERRUPT_PROGRAM
:
417 * Normally program interrupts are delivered directly
418 * to the guest by the hardware, but we can get here
419 * as a result of a hypervisor emulation interrupt
420 * (e40) getting turned into a 700 by BML RTAS.
422 flags
= vcpu
->arch
.shregs
.msr
& 0x1f0000ull
;
423 kvmppc_core_queue_program(vcpu
, flags
);
427 case BOOK3S_INTERRUPT_SYSCALL
:
429 /* hcall - punt to userspace */
432 if (vcpu
->arch
.shregs
.msr
& MSR_PR
) {
433 /* sc 1 from userspace - reflect to guest syscall */
434 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_SYSCALL
);
438 run
->papr_hcall
.nr
= kvmppc_get_gpr(vcpu
, 3);
439 for (i
= 0; i
< 9; ++i
)
440 run
->papr_hcall
.args
[i
] = kvmppc_get_gpr(vcpu
, 4 + i
);
441 run
->exit_reason
= KVM_EXIT_PAPR_HCALL
;
442 vcpu
->arch
.hcall_needed
= 1;
447 * We get these next two if the guest accesses a page which it thinks
448 * it has mapped but which is not actually present, either because
449 * it is for an emulated I/O device or because the corresonding
450 * host page has been paged out. Any other HDSI/HISI interrupts
451 * have been handled already.
453 case BOOK3S_INTERRUPT_H_DATA_STORAGE
:
454 r
= kvmppc_book3s_hv_page_fault(run
, vcpu
,
455 vcpu
->arch
.fault_dar
, vcpu
->arch
.fault_dsisr
);
457 case BOOK3S_INTERRUPT_H_INST_STORAGE
:
458 r
= kvmppc_book3s_hv_page_fault(run
, vcpu
,
459 kvmppc_get_pc(vcpu
), 0);
462 * This occurs if the guest executes an illegal instruction.
463 * We just generate a program interrupt to the guest, since
464 * we don't emulate any guest instructions at this stage.
466 case BOOK3S_INTERRUPT_H_EMUL_ASSIST
:
467 kvmppc_core_queue_program(vcpu
, 0x80000);
471 kvmppc_dump_regs(vcpu
);
472 printk(KERN_EMERG
"trap=0x%x | pc=0x%lx | msr=0x%llx\n",
473 vcpu
->arch
.trap
, kvmppc_get_pc(vcpu
),
474 vcpu
->arch
.shregs
.msr
);
483 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
484 struct kvm_sregs
*sregs
)
488 sregs
->pvr
= vcpu
->arch
.pvr
;
490 memset(sregs
, 0, sizeof(struct kvm_sregs
));
491 for (i
= 0; i
< vcpu
->arch
.slb_max
; i
++) {
492 sregs
->u
.s
.ppc64
.slb
[i
].slbe
= vcpu
->arch
.slb
[i
].orige
;
493 sregs
->u
.s
.ppc64
.slb
[i
].slbv
= vcpu
->arch
.slb
[i
].origv
;
499 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
500 struct kvm_sregs
*sregs
)
504 kvmppc_set_pvr(vcpu
, sregs
->pvr
);
507 for (i
= 0; i
< vcpu
->arch
.slb_nr
; i
++) {
508 if (sregs
->u
.s
.ppc64
.slb
[i
].slbe
& SLB_ESID_V
) {
509 vcpu
->arch
.slb
[j
].orige
= sregs
->u
.s
.ppc64
.slb
[i
].slbe
;
510 vcpu
->arch
.slb
[j
].origv
= sregs
->u
.s
.ppc64
.slb
[i
].slbv
;
514 vcpu
->arch
.slb_max
= j
;
519 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu
*vcpu
, struct kvm_one_reg
*reg
)
524 case KVM_REG_PPC_HIOR
:
525 r
= put_user(0, (u64 __user
*)reg
->addr
);
534 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu
*vcpu
, struct kvm_one_reg
*reg
)
539 case KVM_REG_PPC_HIOR
:
542 /* Only allow this to be set to zero */
543 r
= get_user(hior
, (u64 __user
*)reg
->addr
);
544 if (!r
&& (hior
!= 0))
555 int kvmppc_core_check_processor_compat(void)
557 if (cpu_has_feature(CPU_FTR_HVMODE
))
562 struct kvm_vcpu
*kvmppc_core_vcpu_create(struct kvm
*kvm
, unsigned int id
)
564 struct kvm_vcpu
*vcpu
;
567 struct kvmppc_vcore
*vcore
;
569 core
= id
/ threads_per_core
;
570 if (core
>= KVM_MAX_VCORES
)
574 vcpu
= kmem_cache_zalloc(kvm_vcpu_cache
, GFP_KERNEL
);
578 err
= kvm_vcpu_init(vcpu
, kvm
, id
);
582 vcpu
->arch
.shared
= &vcpu
->arch
.shregs
;
583 vcpu
->arch
.last_cpu
= -1;
584 vcpu
->arch
.mmcr
[0] = MMCR0_FC
;
585 vcpu
->arch
.ctrl
= CTRL_RUNLATCH
;
586 /* default to host PVR, since we can't spoof it */
587 vcpu
->arch
.pvr
= mfspr(SPRN_PVR
);
588 kvmppc_set_pvr(vcpu
, vcpu
->arch
.pvr
);
589 spin_lock_init(&vcpu
->arch
.vpa_update_lock
);
591 kvmppc_mmu_book3s_hv_init(vcpu
);
594 * We consider the vcpu stopped until we see the first run ioctl for it.
596 vcpu
->arch
.state
= KVMPPC_VCPU_STOPPED
;
598 init_waitqueue_head(&vcpu
->arch
.cpu_run
);
600 mutex_lock(&kvm
->lock
);
601 vcore
= kvm
->arch
.vcores
[core
];
603 vcore
= kzalloc(sizeof(struct kvmppc_vcore
), GFP_KERNEL
);
605 INIT_LIST_HEAD(&vcore
->runnable_threads
);
606 spin_lock_init(&vcore
->lock
);
607 init_waitqueue_head(&vcore
->wq
);
608 vcore
->preempt_tb
= mftb();
610 kvm
->arch
.vcores
[core
] = vcore
;
612 mutex_unlock(&kvm
->lock
);
617 spin_lock(&vcore
->lock
);
618 ++vcore
->num_threads
;
619 spin_unlock(&vcore
->lock
);
620 vcpu
->arch
.vcore
= vcore
;
621 vcpu
->arch
.stolen_logged
= vcore
->stolen_tb
;
623 vcpu
->arch
.cpu_type
= KVM_CPU_3S_64
;
624 kvmppc_sanity_check(vcpu
);
629 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
634 void kvmppc_core_vcpu_free(struct kvm_vcpu
*vcpu
)
636 spin_lock(&vcpu
->arch
.vpa_update_lock
);
637 if (vcpu
->arch
.dtl
.pinned_addr
)
638 kvmppc_unpin_guest_page(vcpu
->kvm
, vcpu
->arch
.dtl
.pinned_addr
);
639 if (vcpu
->arch
.slb_shadow
.pinned_addr
)
640 kvmppc_unpin_guest_page(vcpu
->kvm
, vcpu
->arch
.slb_shadow
.pinned_addr
);
641 if (vcpu
->arch
.vpa
.pinned_addr
)
642 kvmppc_unpin_guest_page(vcpu
->kvm
, vcpu
->arch
.vpa
.pinned_addr
);
643 spin_unlock(&vcpu
->arch
.vpa_update_lock
);
644 kvm_vcpu_uninit(vcpu
);
645 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
648 static void kvmppc_set_timer(struct kvm_vcpu
*vcpu
)
650 unsigned long dec_nsec
, now
;
653 if (now
> vcpu
->arch
.dec_expires
) {
654 /* decrementer has already gone negative */
655 kvmppc_core_queue_dec(vcpu
);
656 kvmppc_core_prepare_to_enter(vcpu
);
659 dec_nsec
= (vcpu
->arch
.dec_expires
- now
) * NSEC_PER_SEC
661 hrtimer_start(&vcpu
->arch
.dec_timer
, ktime_set(0, dec_nsec
),
663 vcpu
->arch
.timer_running
= 1;
666 static void kvmppc_end_cede(struct kvm_vcpu
*vcpu
)
668 vcpu
->arch
.ceded
= 0;
669 if (vcpu
->arch
.timer_running
) {
670 hrtimer_try_to_cancel(&vcpu
->arch
.dec_timer
);
671 vcpu
->arch
.timer_running
= 0;
675 extern int __kvmppc_vcore_entry(struct kvm_run
*kvm_run
, struct kvm_vcpu
*vcpu
);
676 extern void xics_wake_cpu(int cpu
);
678 static void kvmppc_remove_runnable(struct kvmppc_vcore
*vc
,
679 struct kvm_vcpu
*vcpu
)
683 if (vcpu
->arch
.state
!= KVMPPC_VCPU_RUNNABLE
)
685 vcpu
->arch
.state
= KVMPPC_VCPU_BUSY_IN_HOST
;
688 /* decrement the physical thread id of each following vcpu */
690 list_for_each_entry_continue(v
, &vc
->runnable_threads
, arch
.run_list
)
692 list_del(&vcpu
->arch
.run_list
);
695 static int kvmppc_grab_hwthread(int cpu
)
697 struct paca_struct
*tpaca
;
702 /* Ensure the thread won't go into the kernel if it wakes */
703 tpaca
->kvm_hstate
.hwthread_req
= 1;
706 * If the thread is already executing in the kernel (e.g. handling
707 * a stray interrupt), wait for it to get back to nap mode.
708 * The smp_mb() is to ensure that our setting of hwthread_req
709 * is visible before we look at hwthread_state, so if this
710 * races with the code at system_reset_pSeries and the thread
711 * misses our setting of hwthread_req, we are sure to see its
712 * setting of hwthread_state, and vice versa.
715 while (tpaca
->kvm_hstate
.hwthread_state
== KVM_HWTHREAD_IN_KERNEL
) {
716 if (--timeout
<= 0) {
717 pr_err("KVM: couldn't grab cpu %d\n", cpu
);
725 static void kvmppc_release_hwthread(int cpu
)
727 struct paca_struct
*tpaca
;
730 tpaca
->kvm_hstate
.hwthread_req
= 0;
731 tpaca
->kvm_hstate
.kvm_vcpu
= NULL
;
734 static void kvmppc_start_thread(struct kvm_vcpu
*vcpu
)
737 struct paca_struct
*tpaca
;
738 struct kvmppc_vcore
*vc
= vcpu
->arch
.vcore
;
740 if (vcpu
->arch
.timer_running
) {
741 hrtimer_try_to_cancel(&vcpu
->arch
.dec_timer
);
742 vcpu
->arch
.timer_running
= 0;
744 cpu
= vc
->pcpu
+ vcpu
->arch
.ptid
;
746 tpaca
->kvm_hstate
.kvm_vcpu
= vcpu
;
747 tpaca
->kvm_hstate
.kvm_vcore
= vc
;
748 tpaca
->kvm_hstate
.napping
= 0;
749 vcpu
->cpu
= vc
->pcpu
;
751 #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
752 if (vcpu
->arch
.ptid
) {
753 kvmppc_grab_hwthread(cpu
);
760 static void kvmppc_wait_for_nap(struct kvmppc_vcore
*vc
)
766 while (vc
->nap_count
< vc
->n_woken
) {
767 if (++i
>= 1000000) {
768 pr_err("kvmppc_wait_for_nap timeout %d %d\n",
769 vc
->nap_count
, vc
->n_woken
);
778 * Check that we are on thread 0 and that any other threads in
779 * this core are off-line.
781 static int on_primary_thread(void)
783 int cpu
= smp_processor_id();
784 int thr
= cpu_thread_in_core(cpu
);
788 while (++thr
< threads_per_core
)
789 if (cpu_online(cpu
+ thr
))
795 * Run a set of guest threads on a physical core.
796 * Called with vc->lock held.
798 static int kvmppc_run_core(struct kvmppc_vcore
*vc
)
800 struct kvm_vcpu
*vcpu
, *vcpu0
, *vnext
;
805 /* don't start if any threads have a signal pending */
806 list_for_each_entry(vcpu
, &vc
->runnable_threads
, arch
.run_list
)
807 if (signal_pending(vcpu
->arch
.run_task
))
811 * Make sure we are running on thread 0, and that
812 * secondary threads are offline.
813 * XXX we should also block attempts to bring any
814 * secondary threads online.
816 if (threads_per_core
> 1 && !on_primary_thread()) {
817 list_for_each_entry(vcpu
, &vc
->runnable_threads
, arch
.run_list
)
818 vcpu
->arch
.ret
= -EBUSY
;
823 * Assign physical thread IDs, first to non-ceded vcpus
824 * and then to ceded ones.
828 list_for_each_entry(vcpu
, &vc
->runnable_threads
, arch
.run_list
) {
829 if (!vcpu
->arch
.ceded
) {
832 vcpu
->arch
.ptid
= ptid
++;
836 return 0; /* nothing to run */
837 list_for_each_entry(vcpu
, &vc
->runnable_threads
, arch
.run_list
)
838 if (vcpu
->arch
.ceded
)
839 vcpu
->arch
.ptid
= ptid
++;
843 vc
->entry_exit_count
= 0;
844 vc
->vcore_state
= VCORE_RUNNING
;
845 vc
->stolen_tb
+= mftb() - vc
->preempt_tb
;
847 vc
->pcpu
= smp_processor_id();
848 vc
->napping_threads
= 0;
849 list_for_each_entry(vcpu
, &vc
->runnable_threads
, arch
.run_list
) {
850 kvmppc_start_thread(vcpu
);
851 if (vcpu
->arch
.vpa
.update_pending
||
852 vcpu
->arch
.slb_shadow
.update_pending
||
853 vcpu
->arch
.dtl
.update_pending
)
854 kvmppc_update_vpas(vcpu
);
855 kvmppc_create_dtl_entry(vcpu
, vc
);
857 /* Grab any remaining hw threads so they can't go into the kernel */
858 for (i
= ptid
; i
< threads_per_core
; ++i
)
859 kvmppc_grab_hwthread(vc
->pcpu
+ i
);
862 spin_unlock(&vc
->lock
);
865 __kvmppc_vcore_entry(NULL
, vcpu0
);
866 for (i
= 0; i
< threads_per_core
; ++i
)
867 kvmppc_release_hwthread(vc
->pcpu
+ i
);
869 spin_lock(&vc
->lock
);
870 /* disable sending of IPIs on virtual external irqs */
871 list_for_each_entry(vcpu
, &vc
->runnable_threads
, arch
.run_list
)
873 /* wait for secondary threads to finish writing their state to memory */
874 if (vc
->nap_count
< vc
->n_woken
)
875 kvmppc_wait_for_nap(vc
);
876 /* prevent other vcpu threads from doing kvmppc_start_thread() now */
877 vc
->vcore_state
= VCORE_EXITING
;
878 spin_unlock(&vc
->lock
);
880 /* make sure updates to secondary vcpu structs are visible now */
888 list_for_each_entry(vcpu
, &vc
->runnable_threads
, arch
.run_list
) {
889 /* cancel pending dec exception if dec is positive */
890 if (now
< vcpu
->arch
.dec_expires
&&
891 kvmppc_core_pending_dec(vcpu
))
892 kvmppc_core_dequeue_dec(vcpu
);
896 ret
= kvmppc_handle_exit(vcpu
->arch
.kvm_run
, vcpu
,
897 vcpu
->arch
.run_task
);
899 vcpu
->arch
.ret
= ret
;
902 if (vcpu
->arch
.ceded
) {
903 if (ret
!= RESUME_GUEST
)
904 kvmppc_end_cede(vcpu
);
906 kvmppc_set_timer(vcpu
);
910 spin_lock(&vc
->lock
);
912 vc
->vcore_state
= VCORE_INACTIVE
;
913 vc
->preempt_tb
= mftb();
914 list_for_each_entry_safe(vcpu
, vnext
, &vc
->runnable_threads
,
916 if (vcpu
->arch
.ret
!= RESUME_GUEST
) {
917 kvmppc_remove_runnable(vc
, vcpu
);
918 wake_up(&vcpu
->arch
.cpu_run
);
926 * Wait for some other vcpu thread to execute us, and
927 * wake us up when we need to handle something in the host.
929 static void kvmppc_wait_for_exec(struct kvm_vcpu
*vcpu
, int wait_state
)
933 prepare_to_wait(&vcpu
->arch
.cpu_run
, &wait
, wait_state
);
934 if (vcpu
->arch
.state
== KVMPPC_VCPU_RUNNABLE
)
936 finish_wait(&vcpu
->arch
.cpu_run
, &wait
);
940 * All the vcpus in this vcore are idle, so wait for a decrementer
941 * or external interrupt to one of the vcpus. vc->lock is held.
943 static void kvmppc_vcore_blocked(struct kvmppc_vcore
*vc
)
949 prepare_to_wait(&vc
->wq
, &wait
, TASK_INTERRUPTIBLE
);
950 vc
->vcore_state
= VCORE_SLEEPING
;
951 spin_unlock(&vc
->lock
);
952 list_for_each_entry(v
, &vc
->runnable_threads
, arch
.run_list
) {
953 if (!v
->arch
.ceded
|| v
->arch
.pending_exceptions
) {
960 finish_wait(&vc
->wq
, &wait
);
961 spin_lock(&vc
->lock
);
962 vc
->vcore_state
= VCORE_INACTIVE
;
965 static int kvmppc_run_vcpu(struct kvm_run
*kvm_run
, struct kvm_vcpu
*vcpu
)
969 struct kvmppc_vcore
*vc
;
970 struct kvm_vcpu
*v
, *vn
;
972 kvm_run
->exit_reason
= 0;
973 vcpu
->arch
.ret
= RESUME_GUEST
;
977 * Synchronize with other threads in this virtual core
979 vc
= vcpu
->arch
.vcore
;
980 spin_lock(&vc
->lock
);
981 vcpu
->arch
.ceded
= 0;
982 vcpu
->arch
.run_task
= current
;
983 vcpu
->arch
.kvm_run
= kvm_run
;
984 prev_state
= vcpu
->arch
.state
;
985 vcpu
->arch
.state
= KVMPPC_VCPU_RUNNABLE
;
986 list_add_tail(&vcpu
->arch
.run_list
, &vc
->runnable_threads
);
990 * This happens the first time this is called for a vcpu.
991 * If the vcore is already running, we may be able to start
992 * this thread straight away and have it join in.
994 if (prev_state
== KVMPPC_VCPU_STOPPED
) {
995 if (vc
->vcore_state
== VCORE_RUNNING
&&
996 VCORE_EXIT_COUNT(vc
) == 0) {
997 vcpu
->arch
.ptid
= vc
->n_runnable
- 1;
998 kvmppc_start_thread(vcpu
);
1001 } else if (prev_state
== KVMPPC_VCPU_BUSY_IN_HOST
)
1004 while (vcpu
->arch
.state
== KVMPPC_VCPU_RUNNABLE
&&
1005 !signal_pending(current
)) {
1006 if (vc
->n_busy
|| vc
->vcore_state
!= VCORE_INACTIVE
) {
1007 spin_unlock(&vc
->lock
);
1008 kvmppc_wait_for_exec(vcpu
, TASK_INTERRUPTIBLE
);
1009 spin_lock(&vc
->lock
);
1014 list_for_each_entry(v
, &vc
->runnable_threads
, arch
.run_list
)
1015 n_ceded
+= v
->arch
.ceded
;
1016 if (n_ceded
== vc
->n_runnable
)
1017 kvmppc_vcore_blocked(vc
);
1019 kvmppc_run_core(vc
);
1021 list_for_each_entry_safe(v
, vn
, &vc
->runnable_threads
,
1023 kvmppc_core_prepare_to_enter(v
);
1024 if (signal_pending(v
->arch
.run_task
)) {
1025 kvmppc_remove_runnable(vc
, v
);
1026 v
->stat
.signal_exits
++;
1027 v
->arch
.kvm_run
->exit_reason
= KVM_EXIT_INTR
;
1028 v
->arch
.ret
= -EINTR
;
1029 wake_up(&v
->arch
.cpu_run
);
1035 if (signal_pending(current
)) {
1036 if (vc
->vcore_state
== VCORE_RUNNING
||
1037 vc
->vcore_state
== VCORE_EXITING
) {
1038 spin_unlock(&vc
->lock
);
1039 kvmppc_wait_for_exec(vcpu
, TASK_UNINTERRUPTIBLE
);
1040 spin_lock(&vc
->lock
);
1042 if (vcpu
->arch
.state
== KVMPPC_VCPU_RUNNABLE
) {
1043 kvmppc_remove_runnable(vc
, vcpu
);
1044 vcpu
->stat
.signal_exits
++;
1045 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
1046 vcpu
->arch
.ret
= -EINTR
;
1050 spin_unlock(&vc
->lock
);
1051 return vcpu
->arch
.ret
;
1054 int kvmppc_vcpu_run(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1058 if (!vcpu
->arch
.sane
) {
1059 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
1063 kvmppc_core_prepare_to_enter(vcpu
);
1065 /* No need to go into the guest when all we'll do is come back out */
1066 if (signal_pending(current
)) {
1067 run
->exit_reason
= KVM_EXIT_INTR
;
1071 /* On the first time here, set up VRMA or RMA */
1072 if (!vcpu
->kvm
->arch
.rma_setup_done
) {
1073 r
= kvmppc_hv_setup_rma(vcpu
);
1078 flush_fp_to_thread(current
);
1079 flush_altivec_to_thread(current
);
1080 flush_vsx_to_thread(current
);
1081 vcpu
->arch
.wqp
= &vcpu
->arch
.vcore
->wq
;
1082 vcpu
->arch
.pgdir
= current
->mm
->pgd
;
1085 r
= kvmppc_run_vcpu(run
, vcpu
);
1087 if (run
->exit_reason
== KVM_EXIT_PAPR_HCALL
&&
1088 !(vcpu
->arch
.shregs
.msr
& MSR_PR
)) {
1089 r
= kvmppc_pseries_do_hcall(vcpu
);
1090 kvmppc_core_prepare_to_enter(vcpu
);
1092 } while (r
== RESUME_GUEST
);
1097 /* Work out RMLS (real mode limit selector) field value for a given RMA size.
1098 Assumes POWER7 or PPC970. */
1099 static inline int lpcr_rmls(unsigned long rma_size
)
1102 case 32ul << 20: /* 32 MB */
1103 if (cpu_has_feature(CPU_FTR_ARCH_206
))
1104 return 8; /* only supported on POWER7 */
1106 case 64ul << 20: /* 64 MB */
1108 case 128ul << 20: /* 128 MB */
1110 case 256ul << 20: /* 256 MB */
1112 case 1ul << 30: /* 1 GB */
1114 case 16ul << 30: /* 16 GB */
1116 case 256ul << 30: /* 256 GB */
1123 static int kvm_rma_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1125 struct kvmppc_linear_info
*ri
= vma
->vm_file
->private_data
;
1128 if (vmf
->pgoff
>= ri
->npages
)
1129 return VM_FAULT_SIGBUS
;
1131 page
= pfn_to_page(ri
->base_pfn
+ vmf
->pgoff
);
1137 static const struct vm_operations_struct kvm_rma_vm_ops
= {
1138 .fault
= kvm_rma_fault
,
1141 static int kvm_rma_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1143 vma
->vm_flags
|= VM_RESERVED
;
1144 vma
->vm_ops
= &kvm_rma_vm_ops
;
1148 static int kvm_rma_release(struct inode
*inode
, struct file
*filp
)
1150 struct kvmppc_linear_info
*ri
= filp
->private_data
;
1152 kvm_release_rma(ri
);
1156 static struct file_operations kvm_rma_fops
= {
1157 .mmap
= kvm_rma_mmap
,
1158 .release
= kvm_rma_release
,
1161 long kvm_vm_ioctl_allocate_rma(struct kvm
*kvm
, struct kvm_allocate_rma
*ret
)
1163 struct kvmppc_linear_info
*ri
;
1166 ri
= kvm_alloc_rma();
1170 fd
= anon_inode_getfd("kvm-rma", &kvm_rma_fops
, ri
, O_RDWR
);
1172 kvm_release_rma(ri
);
1174 ret
->rma_size
= ri
->npages
<< PAGE_SHIFT
;
1178 static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size
**sps
,
1181 struct mmu_psize_def
*def
= &mmu_psize_defs
[linux_psize
];
1185 (*sps
)->page_shift
= def
->shift
;
1186 (*sps
)->slb_enc
= def
->sllp
;
1187 (*sps
)->enc
[0].page_shift
= def
->shift
;
1188 (*sps
)->enc
[0].pte_enc
= def
->penc
;
1192 int kvm_vm_ioctl_get_smmu_info(struct kvm
*kvm
, struct kvm_ppc_smmu_info
*info
)
1194 struct kvm_ppc_one_seg_page_size
*sps
;
1196 info
->flags
= KVM_PPC_PAGE_SIZES_REAL
;
1197 if (mmu_has_feature(MMU_FTR_1T_SEGMENT
))
1198 info
->flags
|= KVM_PPC_1T_SEGMENTS
;
1199 info
->slb_size
= mmu_slb_size
;
1201 /* We only support these sizes for now, and no muti-size segments */
1202 sps
= &info
->sps
[0];
1203 kvmppc_add_seg_page_size(&sps
, MMU_PAGE_4K
);
1204 kvmppc_add_seg_page_size(&sps
, MMU_PAGE_64K
);
1205 kvmppc_add_seg_page_size(&sps
, MMU_PAGE_16M
);
1211 * Get (and clear) the dirty memory log for a memory slot.
1213 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
, struct kvm_dirty_log
*log
)
1215 struct kvm_memory_slot
*memslot
;
1219 mutex_lock(&kvm
->slots_lock
);
1222 if (log
->slot
>= KVM_MEMORY_SLOTS
)
1225 memslot
= id_to_memslot(kvm
->memslots
, log
->slot
);
1227 if (!memslot
->dirty_bitmap
)
1230 n
= kvm_dirty_bitmap_bytes(memslot
);
1231 memset(memslot
->dirty_bitmap
, 0, n
);
1233 r
= kvmppc_hv_get_dirty_log(kvm
, memslot
);
1238 if (copy_to_user(log
->dirty_bitmap
, memslot
->dirty_bitmap
, n
))
1243 mutex_unlock(&kvm
->slots_lock
);
1247 static unsigned long slb_pgsize_encoding(unsigned long psize
)
1249 unsigned long senc
= 0;
1251 if (psize
> 0x1000) {
1253 if (psize
== 0x10000)
1254 senc
|= SLB_VSID_LP_01
;
1259 int kvmppc_core_prepare_memory_region(struct kvm
*kvm
,
1260 struct kvm_userspace_memory_region
*mem
)
1262 unsigned long npages
;
1263 unsigned long *phys
;
1265 /* Allocate a slot_phys array */
1266 phys
= kvm
->arch
.slot_phys
[mem
->slot
];
1267 if (!kvm
->arch
.using_mmu_notifiers
&& !phys
) {
1268 npages
= mem
->memory_size
>> PAGE_SHIFT
;
1269 phys
= vzalloc(npages
* sizeof(unsigned long));
1272 kvm
->arch
.slot_phys
[mem
->slot
] = phys
;
1273 kvm
->arch
.slot_npages
[mem
->slot
] = npages
;
1279 static void unpin_slot(struct kvm
*kvm
, int slot_id
)
1281 unsigned long *physp
;
1282 unsigned long j
, npages
, pfn
;
1285 physp
= kvm
->arch
.slot_phys
[slot_id
];
1286 npages
= kvm
->arch
.slot_npages
[slot_id
];
1288 spin_lock(&kvm
->arch
.slot_phys_lock
);
1289 for (j
= 0; j
< npages
; j
++) {
1290 if (!(physp
[j
] & KVMPPC_GOT_PAGE
))
1292 pfn
= physp
[j
] >> PAGE_SHIFT
;
1293 page
= pfn_to_page(pfn
);
1297 kvm
->arch
.slot_phys
[slot_id
] = NULL
;
1298 spin_unlock(&kvm
->arch
.slot_phys_lock
);
1303 void kvmppc_core_commit_memory_region(struct kvm
*kvm
,
1304 struct kvm_userspace_memory_region
*mem
)
1308 static int kvmppc_hv_setup_rma(struct kvm_vcpu
*vcpu
)
1311 struct kvm
*kvm
= vcpu
->kvm
;
1312 struct kvmppc_linear_info
*ri
= NULL
;
1314 struct kvm_memory_slot
*memslot
;
1315 struct vm_area_struct
*vma
;
1316 unsigned long lpcr
, senc
;
1317 unsigned long psize
, porder
;
1318 unsigned long rma_size
;
1320 unsigned long *physp
;
1321 unsigned long i
, npages
;
1323 mutex_lock(&kvm
->lock
);
1324 if (kvm
->arch
.rma_setup_done
)
1325 goto out
; /* another vcpu beat us to it */
1327 /* Look up the memslot for guest physical address 0 */
1328 memslot
= gfn_to_memslot(kvm
, 0);
1330 /* We must have some memory at 0 by now */
1332 if (!memslot
|| (memslot
->flags
& KVM_MEMSLOT_INVALID
))
1335 /* Look up the VMA for the start of this memory slot */
1336 hva
= memslot
->userspace_addr
;
1337 down_read(¤t
->mm
->mmap_sem
);
1338 vma
= find_vma(current
->mm
, hva
);
1339 if (!vma
|| vma
->vm_start
> hva
|| (vma
->vm_flags
& VM_IO
))
1342 psize
= vma_kernel_pagesize(vma
);
1343 porder
= __ilog2(psize
);
1345 /* Is this one of our preallocated RMAs? */
1346 if (vma
->vm_file
&& vma
->vm_file
->f_op
== &kvm_rma_fops
&&
1347 hva
== vma
->vm_start
)
1348 ri
= vma
->vm_file
->private_data
;
1350 up_read(¤t
->mm
->mmap_sem
);
1353 /* On POWER7, use VRMA; on PPC970, give up */
1355 if (cpu_has_feature(CPU_FTR_ARCH_201
)) {
1356 pr_err("KVM: CPU requires an RMO\n");
1360 /* We can handle 4k, 64k or 16M pages in the VRMA */
1362 if (!(psize
== 0x1000 || psize
== 0x10000 ||
1363 psize
== 0x1000000))
1366 /* Update VRMASD field in the LPCR */
1367 senc
= slb_pgsize_encoding(psize
);
1368 kvm
->arch
.vrma_slb_v
= senc
| SLB_VSID_B_1T
|
1369 (VRMA_VSID
<< SLB_VSID_SHIFT_1T
);
1370 lpcr
= kvm
->arch
.lpcr
& ~LPCR_VRMASD
;
1371 lpcr
|= senc
<< (LPCR_VRMASD_SH
- 4);
1372 kvm
->arch
.lpcr
= lpcr
;
1374 /* Create HPTEs in the hash page table for the VRMA */
1375 kvmppc_map_vrma(vcpu
, memslot
, porder
);
1378 /* Set up to use an RMO region */
1379 rma_size
= ri
->npages
;
1380 if (rma_size
> memslot
->npages
)
1381 rma_size
= memslot
->npages
;
1382 rma_size
<<= PAGE_SHIFT
;
1383 rmls
= lpcr_rmls(rma_size
);
1386 pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size
);
1389 atomic_inc(&ri
->use_count
);
1392 /* Update LPCR and RMOR */
1393 lpcr
= kvm
->arch
.lpcr
;
1394 if (cpu_has_feature(CPU_FTR_ARCH_201
)) {
1395 /* PPC970; insert RMLS value (split field) in HID4 */
1396 lpcr
&= ~((1ul << HID4_RMLS0_SH
) |
1397 (3ul << HID4_RMLS2_SH
));
1398 lpcr
|= ((rmls
>> 2) << HID4_RMLS0_SH
) |
1399 ((rmls
& 3) << HID4_RMLS2_SH
);
1400 /* RMOR is also in HID4 */
1401 lpcr
|= ((ri
->base_pfn
>> (26 - PAGE_SHIFT
)) & 0xffff)
1405 lpcr
&= ~(LPCR_VPM0
| LPCR_VRMA_L
);
1406 lpcr
|= rmls
<< LPCR_RMLS_SH
;
1407 kvm
->arch
.rmor
= kvm
->arch
.rma
->base_pfn
<< PAGE_SHIFT
;
1409 kvm
->arch
.lpcr
= lpcr
;
1410 pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
1411 ri
->base_pfn
<< PAGE_SHIFT
, rma_size
, lpcr
);
1413 /* Initialize phys addrs of pages in RMO */
1414 npages
= ri
->npages
;
1415 porder
= __ilog2(npages
);
1416 physp
= kvm
->arch
.slot_phys
[memslot
->id
];
1417 spin_lock(&kvm
->arch
.slot_phys_lock
);
1418 for (i
= 0; i
< npages
; ++i
)
1419 physp
[i
] = ((ri
->base_pfn
+ i
) << PAGE_SHIFT
) + porder
;
1420 spin_unlock(&kvm
->arch
.slot_phys_lock
);
1423 /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
1425 kvm
->arch
.rma_setup_done
= 1;
1428 mutex_unlock(&kvm
->lock
);
1432 up_read(¤t
->mm
->mmap_sem
);
1436 int kvmppc_core_init_vm(struct kvm
*kvm
)
1441 /* Allocate hashed page table */
1442 r
= kvmppc_alloc_hpt(kvm
);
1446 INIT_LIST_HEAD(&kvm
->arch
.spapr_tce_tables
);
1448 kvm
->arch
.rma
= NULL
;
1450 kvm
->arch
.host_sdr1
= mfspr(SPRN_SDR1
);
1452 if (cpu_has_feature(CPU_FTR_ARCH_201
)) {
1453 /* PPC970; HID4 is effectively the LPCR */
1454 unsigned long lpid
= kvm
->arch
.lpid
;
1455 kvm
->arch
.host_lpid
= 0;
1456 kvm
->arch
.host_lpcr
= lpcr
= mfspr(SPRN_HID4
);
1457 lpcr
&= ~((3 << HID4_LPID1_SH
) | (0xful
<< HID4_LPID5_SH
));
1458 lpcr
|= ((lpid
>> 4) << HID4_LPID1_SH
) |
1459 ((lpid
& 0xf) << HID4_LPID5_SH
);
1461 /* POWER7; init LPCR for virtual RMA mode */
1462 kvm
->arch
.host_lpid
= mfspr(SPRN_LPID
);
1463 kvm
->arch
.host_lpcr
= lpcr
= mfspr(SPRN_LPCR
);
1464 lpcr
&= LPCR_PECE
| LPCR_LPES
;
1465 lpcr
|= (4UL << LPCR_DPFD_SH
) | LPCR_HDICE
|
1466 LPCR_VPM0
| LPCR_VPM1
;
1467 kvm
->arch
.vrma_slb_v
= SLB_VSID_B_1T
|
1468 (VRMA_VSID
<< SLB_VSID_SHIFT_1T
);
1470 kvm
->arch
.lpcr
= lpcr
;
1472 kvm
->arch
.using_mmu_notifiers
= !!cpu_has_feature(CPU_FTR_ARCH_206
);
1473 spin_lock_init(&kvm
->arch
.slot_phys_lock
);
1477 void kvmppc_core_destroy_vm(struct kvm
*kvm
)
1481 if (!kvm
->arch
.using_mmu_notifiers
)
1482 for (i
= 0; i
< KVM_MEM_SLOTS_NUM
; i
++)
1485 if (kvm
->arch
.rma
) {
1486 kvm_release_rma(kvm
->arch
.rma
);
1487 kvm
->arch
.rma
= NULL
;
1490 kvmppc_free_hpt(kvm
);
1491 WARN_ON(!list_empty(&kvm
->arch
.spapr_tce_tables
));
1494 /* These are stubs for now */
1495 void kvmppc_mmu_pte_pflush(struct kvm_vcpu
*vcpu
, ulong pa_start
, ulong pa_end
)
1499 /* We don't need to emulate any privileged instructions or dcbz */
1500 int kvmppc_core_emulate_op(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
1501 unsigned int inst
, int *advance
)
1503 return EMULATE_FAIL
;
1506 int kvmppc_core_emulate_mtspr(struct kvm_vcpu
*vcpu
, int sprn
, ulong spr_val
)
1508 return EMULATE_FAIL
;
1511 int kvmppc_core_emulate_mfspr(struct kvm_vcpu
*vcpu
, int sprn
, ulong
*spr_val
)
1513 return EMULATE_FAIL
;
1516 static int kvmppc_book3s_hv_init(void)
1520 r
= kvm_init(NULL
, sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
1525 r
= kvmppc_mmu_hv_init();
1530 static void kvmppc_book3s_hv_exit(void)
1535 module_init(kvmppc_book3s_hv_init
);
1536 module_exit(kvmppc_book3s_hv_exit
);