Commit | Line | Data |
---|---|---|
de56a948 PM |
1 | /* |
2 | * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | |
3 | * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. | |
4 | * | |
5 | * Authors: | |
6 | * Paul Mackerras <paulus@au1.ibm.com> | |
7 | * Alexander Graf <agraf@suse.de> | |
8 | * Kevin Wolf <mail@kevin-wolf.de> | |
9 | * | |
10 | * Description: KVM functions specific to running on Book 3S | |
11 | * processors in hypervisor mode (specifically POWER7 and later). | |
12 | * | |
13 | * This file is derived from arch/powerpc/kvm/book3s.c, | |
14 | * by Alexander Graf <agraf@suse.de>. | |
15 | * | |
16 | * This program is free software; you can redistribute it and/or modify | |
17 | * it under the terms of the GNU General Public License, version 2, as | |
18 | * published by the Free Software Foundation. | |
19 | */ | |
20 | ||
21 | #include <linux/kvm_host.h> | |
22 | #include <linux/err.h> | |
23 | #include <linux/slab.h> | |
24 | #include <linux/preempt.h> | |
25 | #include <linux/sched.h> | |
26 | #include <linux/delay.h> | |
66b15db6 | 27 | #include <linux/export.h> |
de56a948 PM |
28 | #include <linux/fs.h> |
29 | #include <linux/anon_inodes.h> | |
30 | #include <linux/cpumask.h> | |
aa04b4cc PM |
31 | #include <linux/spinlock.h> |
32 | #include <linux/page-flags.h> | |
de56a948 PM |
33 | |
34 | #include <asm/reg.h> | |
35 | #include <asm/cputable.h> | |
36 | #include <asm/cacheflush.h> | |
37 | #include <asm/tlbflush.h> | |
38 | #include <asm/uaccess.h> | |
39 | #include <asm/io.h> | |
40 | #include <asm/kvm_ppc.h> | |
41 | #include <asm/kvm_book3s.h> | |
42 | #include <asm/mmu_context.h> | |
43 | #include <asm/lppaca.h> | |
44 | #include <asm/processor.h> | |
371fefd6 | 45 | #include <asm/cputhreads.h> |
aa04b4cc | 46 | #include <asm/page.h> |
de1d9248 | 47 | #include <asm/hvcall.h> |
ae3a197e | 48 | #include <asm/switch_to.h> |
de56a948 | 49 | #include <linux/gfp.h> |
de56a948 PM |
50 | #include <linux/vmalloc.h> |
51 | #include <linux/highmem.h> | |
c77162de | 52 | #include <linux/hugetlb.h> |
de56a948 PM |
53 | |
54 | /* #define EXIT_DEBUG */ | |
55 | /* #define EXIT_DEBUG_SIMPLE */ | |
56 | /* #define EXIT_DEBUG_INT */ | |
57 | ||
19ccb76a | 58 | static void kvmppc_end_cede(struct kvm_vcpu *vcpu); |
c77162de | 59 | static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu); |
19ccb76a | 60 | |
de56a948 PM |
61 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
62 | { | |
0456ec4f PM |
63 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
64 | ||
de56a948 | 65 | local_paca->kvm_hstate.kvm_vcpu = vcpu; |
0456ec4f PM |
66 | local_paca->kvm_hstate.kvm_vcore = vc; |
67 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) | |
68 | vc->stolen_tb += mftb() - vc->preempt_tb; | |
de56a948 PM |
69 | } |
70 | ||
71 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | |
72 | { | |
0456ec4f PM |
73 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
74 | ||
75 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) | |
76 | vc->preempt_tb = mftb(); | |
de56a948 PM |
77 | } |
78 | ||
de56a948 PM |
79 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) |
80 | { | |
81 | vcpu->arch.shregs.msr = msr; | |
19ccb76a | 82 | kvmppc_end_cede(vcpu); |
de56a948 PM |
83 | } |
84 | ||
85 | void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) | |
86 | { | |
87 | vcpu->arch.pvr = pvr; | |
88 | } | |
89 | ||
90 | void kvmppc_dump_regs(struct kvm_vcpu *vcpu) | |
91 | { | |
92 | int r; | |
93 | ||
94 | pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id); | |
95 | pr_err("pc = %.16lx msr = %.16llx trap = %x\n", | |
96 | vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap); | |
97 | for (r = 0; r < 16; ++r) | |
98 | pr_err("r%2d = %.16lx r%d = %.16lx\n", | |
99 | r, kvmppc_get_gpr(vcpu, r), | |
100 | r+16, kvmppc_get_gpr(vcpu, r+16)); | |
101 | pr_err("ctr = %.16lx lr = %.16lx\n", | |
102 | vcpu->arch.ctr, vcpu->arch.lr); | |
103 | pr_err("srr0 = %.16llx srr1 = %.16llx\n", | |
104 | vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); | |
105 | pr_err("sprg0 = %.16llx sprg1 = %.16llx\n", | |
106 | vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1); | |
107 | pr_err("sprg2 = %.16llx sprg3 = %.16llx\n", | |
108 | vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); | |
109 | pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n", | |
110 | vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr); | |
111 | pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); | |
112 | pr_err("fault dar = %.16lx dsisr = %.8x\n", | |
113 | vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); | |
114 | pr_err("SLB (%d entries):\n", vcpu->arch.slb_max); | |
115 | for (r = 0; r < vcpu->arch.slb_max; ++r) | |
116 | pr_err(" ESID = %.16llx VSID = %.16llx\n", | |
117 | vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); | |
118 | pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n", | |
aa04b4cc | 119 | vcpu->kvm->arch.lpcr, vcpu->kvm->arch.sdr1, |
de56a948 PM |
120 | vcpu->arch.last_inst); |
121 | } | |
122 | ||
a8606e20 PM |
123 | struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id) |
124 | { | |
125 | int r; | |
126 | struct kvm_vcpu *v, *ret = NULL; | |
127 | ||
128 | mutex_lock(&kvm->lock); | |
129 | kvm_for_each_vcpu(r, v, kvm) { | |
130 | if (v->vcpu_id == id) { | |
131 | ret = v; | |
132 | break; | |
133 | } | |
134 | } | |
135 | mutex_unlock(&kvm->lock); | |
136 | return ret; | |
137 | } | |
138 | ||
139 | static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa) | |
140 | { | |
141 | vpa->shared_proc = 1; | |
142 | vpa->yield_count = 1; | |
143 | } | |
144 | ||
2e25aa5f PM |
145 | /* Length for a per-processor buffer is passed in at offset 4 in the buffer */ |
146 | struct reg_vpa { | |
147 | u32 dummy; | |
148 | union { | |
149 | u16 hword; | |
150 | u32 word; | |
151 | } length; | |
152 | }; | |
153 | ||
154 | static int vpa_is_registered(struct kvmppc_vpa *vpap) | |
155 | { | |
156 | if (vpap->update_pending) | |
157 | return vpap->next_gpa != 0; | |
158 | return vpap->pinned_addr != NULL; | |
159 | } | |
160 | ||
a8606e20 PM |
161 | static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, |
162 | unsigned long flags, | |
163 | unsigned long vcpuid, unsigned long vpa) | |
164 | { | |
165 | struct kvm *kvm = vcpu->kvm; | |
93e60249 | 166 | unsigned long len, nb; |
a8606e20 PM |
167 | void *va; |
168 | struct kvm_vcpu *tvcpu; | |
2e25aa5f PM |
169 | int err; |
170 | int subfunc; | |
171 | struct kvmppc_vpa *vpap; | |
a8606e20 PM |
172 | |
173 | tvcpu = kvmppc_find_vcpu(kvm, vcpuid); | |
174 | if (!tvcpu) | |
175 | return H_PARAMETER; | |
176 | ||
2e25aa5f PM |
177 | subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK; |
178 | if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL || | |
179 | subfunc == H_VPA_REG_SLB) { | |
180 | /* Registering new area - address must be cache-line aligned */ | |
181 | if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa) | |
a8606e20 | 182 | return H_PARAMETER; |
2e25aa5f PM |
183 | |
184 | /* convert logical addr to kernel addr and read length */ | |
93e60249 PM |
185 | va = kvmppc_pin_guest_page(kvm, vpa, &nb); |
186 | if (va == NULL) | |
b2b2f165 | 187 | return H_PARAMETER; |
2e25aa5f PM |
188 | if (subfunc == H_VPA_REG_VPA) |
189 | len = ((struct reg_vpa *)va)->length.hword; | |
a8606e20 | 190 | else |
2e25aa5f PM |
191 | len = ((struct reg_vpa *)va)->length.word; |
192 | kvmppc_unpin_guest_page(kvm, va); | |
193 | ||
194 | /* Check length */ | |
195 | if (len > nb || len < sizeof(struct reg_vpa)) | |
196 | return H_PARAMETER; | |
197 | } else { | |
198 | vpa = 0; | |
199 | len = 0; | |
200 | } | |
201 | ||
202 | err = H_PARAMETER; | |
203 | vpap = NULL; | |
204 | spin_lock(&tvcpu->arch.vpa_update_lock); | |
205 | ||
206 | switch (subfunc) { | |
207 | case H_VPA_REG_VPA: /* register VPA */ | |
208 | if (len < sizeof(struct lppaca)) | |
a8606e20 | 209 | break; |
2e25aa5f PM |
210 | vpap = &tvcpu->arch.vpa; |
211 | err = 0; | |
212 | break; | |
213 | ||
214 | case H_VPA_REG_DTL: /* register DTL */ | |
215 | if (len < sizeof(struct dtl_entry)) | |
a8606e20 | 216 | break; |
2e25aa5f PM |
217 | len -= len % sizeof(struct dtl_entry); |
218 | ||
219 | /* Check that they have previously registered a VPA */ | |
220 | err = H_RESOURCE; | |
221 | if (!vpa_is_registered(&tvcpu->arch.vpa)) | |
a8606e20 | 222 | break; |
2e25aa5f PM |
223 | |
224 | vpap = &tvcpu->arch.dtl; | |
225 | err = 0; | |
226 | break; | |
227 | ||
228 | case H_VPA_REG_SLB: /* register SLB shadow buffer */ | |
229 | /* Check that they have previously registered a VPA */ | |
230 | err = H_RESOURCE; | |
231 | if (!vpa_is_registered(&tvcpu->arch.vpa)) | |
a8606e20 | 232 | break; |
2e25aa5f PM |
233 | |
234 | vpap = &tvcpu->arch.slb_shadow; | |
235 | err = 0; | |
236 | break; | |
237 | ||
238 | case H_VPA_DEREG_VPA: /* deregister VPA */ | |
239 | /* Check they don't still have a DTL or SLB buf registered */ | |
240 | err = H_RESOURCE; | |
241 | if (vpa_is_registered(&tvcpu->arch.dtl) || | |
242 | vpa_is_registered(&tvcpu->arch.slb_shadow)) | |
a8606e20 | 243 | break; |
2e25aa5f PM |
244 | |
245 | vpap = &tvcpu->arch.vpa; | |
246 | err = 0; | |
247 | break; | |
248 | ||
249 | case H_VPA_DEREG_DTL: /* deregister DTL */ | |
250 | vpap = &tvcpu->arch.dtl; | |
251 | err = 0; | |
252 | break; | |
253 | ||
254 | case H_VPA_DEREG_SLB: /* deregister SLB shadow buffer */ | |
255 | vpap = &tvcpu->arch.slb_shadow; | |
256 | err = 0; | |
257 | break; | |
258 | } | |
259 | ||
260 | if (vpap) { | |
261 | vpap->next_gpa = vpa; | |
262 | vpap->len = len; | |
263 | vpap->update_pending = 1; | |
a8606e20 | 264 | } |
93e60249 | 265 | |
2e25aa5f PM |
266 | spin_unlock(&tvcpu->arch.vpa_update_lock); |
267 | ||
93e60249 | 268 | return err; |
a8606e20 PM |
269 | } |
270 | ||
2e25aa5f PM |
271 | static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap) |
272 | { | |
273 | void *va; | |
274 | unsigned long nb; | |
275 | ||
276 | vpap->update_pending = 0; | |
277 | va = NULL; | |
278 | if (vpap->next_gpa) { | |
279 | va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb); | |
280 | if (nb < vpap->len) { | |
281 | /* | |
282 | * If it's now too short, it must be that userspace | |
283 | * has changed the mappings underlying guest memory, | |
284 | * so unregister the region. | |
285 | */ | |
286 | kvmppc_unpin_guest_page(kvm, va); | |
287 | va = NULL; | |
288 | } | |
289 | } | |
290 | if (vpap->pinned_addr) | |
291 | kvmppc_unpin_guest_page(kvm, vpap->pinned_addr); | |
292 | vpap->pinned_addr = va; | |
293 | if (va) | |
294 | vpap->pinned_end = va + vpap->len; | |
295 | } | |
296 | ||
297 | static void kvmppc_update_vpas(struct kvm_vcpu *vcpu) | |
298 | { | |
299 | struct kvm *kvm = vcpu->kvm; | |
300 | ||
301 | spin_lock(&vcpu->arch.vpa_update_lock); | |
302 | if (vcpu->arch.vpa.update_pending) { | |
303 | kvmppc_update_vpa(kvm, &vcpu->arch.vpa); | |
304 | init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); | |
305 | } | |
306 | if (vcpu->arch.dtl.update_pending) { | |
307 | kvmppc_update_vpa(kvm, &vcpu->arch.dtl); | |
308 | vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; | |
309 | vcpu->arch.dtl_index = 0; | |
310 | } | |
311 | if (vcpu->arch.slb_shadow.update_pending) | |
312 | kvmppc_update_vpa(kvm, &vcpu->arch.slb_shadow); | |
313 | spin_unlock(&vcpu->arch.vpa_update_lock); | |
314 | } | |
315 | ||
0456ec4f PM |
316 | static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, |
317 | struct kvmppc_vcore *vc) | |
318 | { | |
319 | struct dtl_entry *dt; | |
320 | struct lppaca *vpa; | |
321 | unsigned long old_stolen; | |
322 | ||
323 | dt = vcpu->arch.dtl_ptr; | |
324 | vpa = vcpu->arch.vpa.pinned_addr; | |
325 | old_stolen = vcpu->arch.stolen_logged; | |
326 | vcpu->arch.stolen_logged = vc->stolen_tb; | |
327 | if (!dt || !vpa) | |
328 | return; | |
329 | memset(dt, 0, sizeof(struct dtl_entry)); | |
330 | dt->dispatch_reason = 7; | |
331 | dt->processor_id = vc->pcpu + vcpu->arch.ptid; | |
332 | dt->timebase = mftb(); | |
333 | dt->enqueue_to_dispatch_time = vc->stolen_tb - old_stolen; | |
334 | dt->srr0 = kvmppc_get_pc(vcpu); | |
335 | dt->srr1 = vcpu->arch.shregs.msr; | |
336 | ++dt; | |
337 | if (dt == vcpu->arch.dtl.pinned_end) | |
338 | dt = vcpu->arch.dtl.pinned_addr; | |
339 | vcpu->arch.dtl_ptr = dt; | |
340 | /* order writing *dt vs. writing vpa->dtl_idx */ | |
341 | smp_wmb(); | |
342 | vpa->dtl_idx = ++vcpu->arch.dtl_index; | |
343 | } | |
344 | ||
a8606e20 PM |
345 | int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) |
346 | { | |
347 | unsigned long req = kvmppc_get_gpr(vcpu, 3); | |
348 | unsigned long target, ret = H_SUCCESS; | |
349 | struct kvm_vcpu *tvcpu; | |
350 | ||
351 | switch (req) { | |
c77162de PM |
352 | case H_ENTER: |
353 | ret = kvmppc_virtmode_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4), | |
354 | kvmppc_get_gpr(vcpu, 5), | |
355 | kvmppc_get_gpr(vcpu, 6), | |
356 | kvmppc_get_gpr(vcpu, 7)); | |
357 | break; | |
a8606e20 | 358 | case H_CEDE: |
a8606e20 PM |
359 | break; |
360 | case H_PROD: | |
361 | target = kvmppc_get_gpr(vcpu, 4); | |
362 | tvcpu = kvmppc_find_vcpu(vcpu->kvm, target); | |
363 | if (!tvcpu) { | |
364 | ret = H_PARAMETER; | |
365 | break; | |
366 | } | |
367 | tvcpu->arch.prodded = 1; | |
368 | smp_mb(); | |
369 | if (vcpu->arch.ceded) { | |
370 | if (waitqueue_active(&vcpu->wq)) { | |
371 | wake_up_interruptible(&vcpu->wq); | |
372 | vcpu->stat.halt_wakeup++; | |
373 | } | |
374 | } | |
375 | break; | |
376 | case H_CONFER: | |
377 | break; | |
378 | case H_REGISTER_VPA: | |
379 | ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4), | |
380 | kvmppc_get_gpr(vcpu, 5), | |
381 | kvmppc_get_gpr(vcpu, 6)); | |
382 | break; | |
383 | default: | |
384 | return RESUME_HOST; | |
385 | } | |
386 | kvmppc_set_gpr(vcpu, 3, ret); | |
387 | vcpu->arch.hcall_needed = 0; | |
388 | return RESUME_GUEST; | |
389 | } | |
390 | ||
de56a948 PM |
391 | static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, |
392 | struct task_struct *tsk) | |
393 | { | |
394 | int r = RESUME_HOST; | |
395 | ||
396 | vcpu->stat.sum_exits++; | |
397 | ||
398 | run->exit_reason = KVM_EXIT_UNKNOWN; | |
399 | run->ready_for_interrupt_injection = 1; | |
400 | switch (vcpu->arch.trap) { | |
401 | /* We're good on these - the host merely wanted to get our attention */ | |
402 | case BOOK3S_INTERRUPT_HV_DECREMENTER: | |
403 | vcpu->stat.dec_exits++; | |
404 | r = RESUME_GUEST; | |
405 | break; | |
406 | case BOOK3S_INTERRUPT_EXTERNAL: | |
407 | vcpu->stat.ext_intr_exits++; | |
408 | r = RESUME_GUEST; | |
409 | break; | |
410 | case BOOK3S_INTERRUPT_PERFMON: | |
411 | r = RESUME_GUEST; | |
412 | break; | |
413 | case BOOK3S_INTERRUPT_PROGRAM: | |
414 | { | |
415 | ulong flags; | |
416 | /* | |
417 | * Normally program interrupts are delivered directly | |
418 | * to the guest by the hardware, but we can get here | |
419 | * as a result of a hypervisor emulation interrupt | |
420 | * (e40) getting turned into a 700 by BML RTAS. | |
421 | */ | |
422 | flags = vcpu->arch.shregs.msr & 0x1f0000ull; | |
423 | kvmppc_core_queue_program(vcpu, flags); | |
424 | r = RESUME_GUEST; | |
425 | break; | |
426 | } | |
427 | case BOOK3S_INTERRUPT_SYSCALL: | |
428 | { | |
429 | /* hcall - punt to userspace */ | |
430 | int i; | |
431 | ||
432 | if (vcpu->arch.shregs.msr & MSR_PR) { | |
433 | /* sc 1 from userspace - reflect to guest syscall */ | |
434 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_SYSCALL); | |
435 | r = RESUME_GUEST; | |
436 | break; | |
437 | } | |
438 | run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3); | |
439 | for (i = 0; i < 9; ++i) | |
440 | run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i); | |
441 | run->exit_reason = KVM_EXIT_PAPR_HCALL; | |
442 | vcpu->arch.hcall_needed = 1; | |
443 | r = RESUME_HOST; | |
444 | break; | |
445 | } | |
446 | /* | |
342d3db7 PM |
447 | * We get these next two if the guest accesses a page which it thinks |
448 | * it has mapped but which is not actually present, either because | |
449 | * it is for an emulated I/O device or because the corresonding | |
450 | * host page has been paged out. Any other HDSI/HISI interrupts | |
451 | * have been handled already. | |
de56a948 PM |
452 | */ |
453 | case BOOK3S_INTERRUPT_H_DATA_STORAGE: | |
697d3899 PM |
454 | r = kvmppc_book3s_hv_page_fault(run, vcpu, |
455 | vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); | |
de56a948 PM |
456 | break; |
457 | case BOOK3S_INTERRUPT_H_INST_STORAGE: | |
342d3db7 PM |
458 | r = kvmppc_book3s_hv_page_fault(run, vcpu, |
459 | kvmppc_get_pc(vcpu), 0); | |
de56a948 PM |
460 | break; |
461 | /* | |
462 | * This occurs if the guest executes an illegal instruction. | |
463 | * We just generate a program interrupt to the guest, since | |
464 | * we don't emulate any guest instructions at this stage. | |
465 | */ | |
466 | case BOOK3S_INTERRUPT_H_EMUL_ASSIST: | |
467 | kvmppc_core_queue_program(vcpu, 0x80000); | |
468 | r = RESUME_GUEST; | |
469 | break; | |
470 | default: | |
471 | kvmppc_dump_regs(vcpu); | |
472 | printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n", | |
473 | vcpu->arch.trap, kvmppc_get_pc(vcpu), | |
474 | vcpu->arch.shregs.msr); | |
475 | r = RESUME_HOST; | |
476 | BUG(); | |
477 | break; | |
478 | } | |
479 | ||
de56a948 PM |
480 | return r; |
481 | } | |
482 | ||
483 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |
484 | struct kvm_sregs *sregs) | |
485 | { | |
486 | int i; | |
487 | ||
488 | sregs->pvr = vcpu->arch.pvr; | |
489 | ||
490 | memset(sregs, 0, sizeof(struct kvm_sregs)); | |
491 | for (i = 0; i < vcpu->arch.slb_max; i++) { | |
492 | sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige; | |
493 | sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; | |
494 | } | |
495 | ||
496 | return 0; | |
497 | } | |
498 | ||
499 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |
500 | struct kvm_sregs *sregs) | |
501 | { | |
502 | int i, j; | |
503 | ||
504 | kvmppc_set_pvr(vcpu, sregs->pvr); | |
505 | ||
506 | j = 0; | |
507 | for (i = 0; i < vcpu->arch.slb_nr; i++) { | |
508 | if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) { | |
509 | vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe; | |
510 | vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv; | |
511 | ++j; | |
512 | } | |
513 | } | |
514 | vcpu->arch.slb_max = j; | |
515 | ||
516 | return 0; | |
517 | } | |
518 | ||
31f3438e PM |
519 | int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) |
520 | { | |
521 | int r = -EINVAL; | |
522 | ||
523 | switch (reg->id) { | |
524 | case KVM_REG_PPC_HIOR: | |
525 | r = put_user(0, (u64 __user *)reg->addr); | |
526 | break; | |
527 | default: | |
528 | break; | |
529 | } | |
530 | ||
531 | return r; | |
532 | } | |
533 | ||
534 | int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |
535 | { | |
536 | int r = -EINVAL; | |
537 | ||
538 | switch (reg->id) { | |
539 | case KVM_REG_PPC_HIOR: | |
540 | { | |
541 | u64 hior; | |
542 | /* Only allow this to be set to zero */ | |
543 | r = get_user(hior, (u64 __user *)reg->addr); | |
544 | if (!r && (hior != 0)) | |
545 | r = -EINVAL; | |
546 | break; | |
547 | } | |
548 | default: | |
549 | break; | |
550 | } | |
551 | ||
552 | return r; | |
553 | } | |
554 | ||
de56a948 PM |
555 | int kvmppc_core_check_processor_compat(void) |
556 | { | |
9e368f29 | 557 | if (cpu_has_feature(CPU_FTR_HVMODE)) |
de56a948 PM |
558 | return 0; |
559 | return -EIO; | |
560 | } | |
561 | ||
562 | struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |
563 | { | |
564 | struct kvm_vcpu *vcpu; | |
371fefd6 PM |
565 | int err = -EINVAL; |
566 | int core; | |
567 | struct kvmppc_vcore *vcore; | |
de56a948 | 568 | |
371fefd6 PM |
569 | core = id / threads_per_core; |
570 | if (core >= KVM_MAX_VCORES) | |
571 | goto out; | |
572 | ||
573 | err = -ENOMEM; | |
6b75e6bf | 574 | vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); |
de56a948 PM |
575 | if (!vcpu) |
576 | goto out; | |
577 | ||
578 | err = kvm_vcpu_init(vcpu, kvm, id); | |
579 | if (err) | |
580 | goto free_vcpu; | |
581 | ||
582 | vcpu->arch.shared = &vcpu->arch.shregs; | |
583 | vcpu->arch.last_cpu = -1; | |
584 | vcpu->arch.mmcr[0] = MMCR0_FC; | |
585 | vcpu->arch.ctrl = CTRL_RUNLATCH; | |
586 | /* default to host PVR, since we can't spoof it */ | |
587 | vcpu->arch.pvr = mfspr(SPRN_PVR); | |
588 | kvmppc_set_pvr(vcpu, vcpu->arch.pvr); | |
2e25aa5f | 589 | spin_lock_init(&vcpu->arch.vpa_update_lock); |
de56a948 | 590 | |
de56a948 PM |
591 | kvmppc_mmu_book3s_hv_init(vcpu); |
592 | ||
371fefd6 | 593 | /* |
19ccb76a | 594 | * We consider the vcpu stopped until we see the first run ioctl for it. |
371fefd6 | 595 | */ |
19ccb76a | 596 | vcpu->arch.state = KVMPPC_VCPU_STOPPED; |
371fefd6 PM |
597 | |
598 | init_waitqueue_head(&vcpu->arch.cpu_run); | |
599 | ||
600 | mutex_lock(&kvm->lock); | |
601 | vcore = kvm->arch.vcores[core]; | |
602 | if (!vcore) { | |
603 | vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL); | |
604 | if (vcore) { | |
605 | INIT_LIST_HEAD(&vcore->runnable_threads); | |
606 | spin_lock_init(&vcore->lock); | |
19ccb76a | 607 | init_waitqueue_head(&vcore->wq); |
0456ec4f | 608 | vcore->preempt_tb = mftb(); |
371fefd6 PM |
609 | } |
610 | kvm->arch.vcores[core] = vcore; | |
611 | } | |
612 | mutex_unlock(&kvm->lock); | |
613 | ||
614 | if (!vcore) | |
615 | goto free_vcpu; | |
616 | ||
617 | spin_lock(&vcore->lock); | |
618 | ++vcore->num_threads; | |
371fefd6 PM |
619 | spin_unlock(&vcore->lock); |
620 | vcpu->arch.vcore = vcore; | |
0456ec4f | 621 | vcpu->arch.stolen_logged = vcore->stolen_tb; |
371fefd6 | 622 | |
af8f38b3 AG |
623 | vcpu->arch.cpu_type = KVM_CPU_3S_64; |
624 | kvmppc_sanity_check(vcpu); | |
625 | ||
de56a948 PM |
626 | return vcpu; |
627 | ||
628 | free_vcpu: | |
6b75e6bf | 629 | kmem_cache_free(kvm_vcpu_cache, vcpu); |
de56a948 PM |
630 | out: |
631 | return ERR_PTR(err); | |
632 | } | |
633 | ||
634 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | |
635 | { | |
2e25aa5f PM |
636 | spin_lock(&vcpu->arch.vpa_update_lock); |
637 | if (vcpu->arch.dtl.pinned_addr) | |
638 | kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.dtl.pinned_addr); | |
639 | if (vcpu->arch.slb_shadow.pinned_addr) | |
640 | kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.slb_shadow.pinned_addr); | |
641 | if (vcpu->arch.vpa.pinned_addr) | |
642 | kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.vpa.pinned_addr); | |
643 | spin_unlock(&vcpu->arch.vpa_update_lock); | |
de56a948 | 644 | kvm_vcpu_uninit(vcpu); |
6b75e6bf | 645 | kmem_cache_free(kvm_vcpu_cache, vcpu); |
de56a948 PM |
646 | } |
647 | ||
19ccb76a | 648 | static void kvmppc_set_timer(struct kvm_vcpu *vcpu) |
371fefd6 | 649 | { |
19ccb76a | 650 | unsigned long dec_nsec, now; |
371fefd6 | 651 | |
19ccb76a PM |
652 | now = get_tb(); |
653 | if (now > vcpu->arch.dec_expires) { | |
654 | /* decrementer has already gone negative */ | |
655 | kvmppc_core_queue_dec(vcpu); | |
7e28e60e | 656 | kvmppc_core_prepare_to_enter(vcpu); |
19ccb76a | 657 | return; |
371fefd6 | 658 | } |
19ccb76a PM |
659 | dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC |
660 | / tb_ticks_per_sec; | |
661 | hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec), | |
662 | HRTIMER_MODE_REL); | |
663 | vcpu->arch.timer_running = 1; | |
371fefd6 PM |
664 | } |
665 | ||
19ccb76a | 666 | static void kvmppc_end_cede(struct kvm_vcpu *vcpu) |
371fefd6 | 667 | { |
19ccb76a PM |
668 | vcpu->arch.ceded = 0; |
669 | if (vcpu->arch.timer_running) { | |
670 | hrtimer_try_to_cancel(&vcpu->arch.dec_timer); | |
671 | vcpu->arch.timer_running = 0; | |
672 | } | |
371fefd6 PM |
673 | } |
674 | ||
de56a948 | 675 | extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); |
371fefd6 | 676 | extern void xics_wake_cpu(int cpu); |
de56a948 | 677 | |
371fefd6 PM |
678 | static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, |
679 | struct kvm_vcpu *vcpu) | |
de56a948 | 680 | { |
371fefd6 | 681 | struct kvm_vcpu *v; |
de56a948 | 682 | |
371fefd6 PM |
683 | if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) |
684 | return; | |
685 | vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; | |
686 | --vc->n_runnable; | |
19ccb76a | 687 | ++vc->n_busy; |
371fefd6 PM |
688 | /* decrement the physical thread id of each following vcpu */ |
689 | v = vcpu; | |
690 | list_for_each_entry_continue(v, &vc->runnable_threads, arch.run_list) | |
691 | --v->arch.ptid; | |
692 | list_del(&vcpu->arch.run_list); | |
693 | } | |
694 | ||
f0888f70 PM |
695 | static int kvmppc_grab_hwthread(int cpu) |
696 | { | |
697 | struct paca_struct *tpaca; | |
698 | long timeout = 1000; | |
699 | ||
700 | tpaca = &paca[cpu]; | |
701 | ||
702 | /* Ensure the thread won't go into the kernel if it wakes */ | |
703 | tpaca->kvm_hstate.hwthread_req = 1; | |
704 | ||
705 | /* | |
706 | * If the thread is already executing in the kernel (e.g. handling | |
707 | * a stray interrupt), wait for it to get back to nap mode. | |
708 | * The smp_mb() is to ensure that our setting of hwthread_req | |
709 | * is visible before we look at hwthread_state, so if this | |
710 | * races with the code at system_reset_pSeries and the thread | |
711 | * misses our setting of hwthread_req, we are sure to see its | |
712 | * setting of hwthread_state, and vice versa. | |
713 | */ | |
714 | smp_mb(); | |
715 | while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) { | |
716 | if (--timeout <= 0) { | |
717 | pr_err("KVM: couldn't grab cpu %d\n", cpu); | |
718 | return -EBUSY; | |
719 | } | |
720 | udelay(1); | |
721 | } | |
722 | return 0; | |
723 | } | |
724 | ||
725 | static void kvmppc_release_hwthread(int cpu) | |
726 | { | |
727 | struct paca_struct *tpaca; | |
728 | ||
729 | tpaca = &paca[cpu]; | |
730 | tpaca->kvm_hstate.hwthread_req = 0; | |
731 | tpaca->kvm_hstate.kvm_vcpu = NULL; | |
732 | } | |
733 | ||
371fefd6 PM |
734 | static void kvmppc_start_thread(struct kvm_vcpu *vcpu) |
735 | { | |
736 | int cpu; | |
737 | struct paca_struct *tpaca; | |
738 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | |
739 | ||
19ccb76a PM |
740 | if (vcpu->arch.timer_running) { |
741 | hrtimer_try_to_cancel(&vcpu->arch.dec_timer); | |
742 | vcpu->arch.timer_running = 0; | |
743 | } | |
371fefd6 PM |
744 | cpu = vc->pcpu + vcpu->arch.ptid; |
745 | tpaca = &paca[cpu]; | |
746 | tpaca->kvm_hstate.kvm_vcpu = vcpu; | |
747 | tpaca->kvm_hstate.kvm_vcore = vc; | |
19ccb76a PM |
748 | tpaca->kvm_hstate.napping = 0; |
749 | vcpu->cpu = vc->pcpu; | |
371fefd6 | 750 | smp_wmb(); |
251da038 | 751 | #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) |
371fefd6 | 752 | if (vcpu->arch.ptid) { |
f0888f70 | 753 | kvmppc_grab_hwthread(cpu); |
371fefd6 PM |
754 | xics_wake_cpu(cpu); |
755 | ++vc->n_woken; | |
de56a948 | 756 | } |
371fefd6 PM |
757 | #endif |
758 | } | |
de56a948 | 759 | |
371fefd6 PM |
760 | static void kvmppc_wait_for_nap(struct kvmppc_vcore *vc) |
761 | { | |
762 | int i; | |
763 | ||
764 | HMT_low(); | |
765 | i = 0; | |
766 | while (vc->nap_count < vc->n_woken) { | |
767 | if (++i >= 1000000) { | |
768 | pr_err("kvmppc_wait_for_nap timeout %d %d\n", | |
769 | vc->nap_count, vc->n_woken); | |
770 | break; | |
771 | } | |
772 | cpu_relax(); | |
773 | } | |
774 | HMT_medium(); | |
775 | } | |
776 | ||
777 | /* | |
778 | * Check that we are on thread 0 and that any other threads in | |
779 | * this core are off-line. | |
780 | */ | |
781 | static int on_primary_thread(void) | |
782 | { | |
783 | int cpu = smp_processor_id(); | |
784 | int thr = cpu_thread_in_core(cpu); | |
785 | ||
786 | if (thr) | |
787 | return 0; | |
788 | while (++thr < threads_per_core) | |
789 | if (cpu_online(cpu + thr)) | |
790 | return 0; | |
791 | return 1; | |
792 | } | |
793 | ||
794 | /* | |
795 | * Run a set of guest threads on a physical core. | |
796 | * Called with vc->lock held. | |
797 | */ | |
798 | static int kvmppc_run_core(struct kvmppc_vcore *vc) | |
799 | { | |
19ccb76a | 800 | struct kvm_vcpu *vcpu, *vcpu0, *vnext; |
371fefd6 PM |
801 | long ret; |
802 | u64 now; | |
f0888f70 | 803 | int ptid, i; |
371fefd6 PM |
804 | |
805 | /* don't start if any threads have a signal pending */ | |
806 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) | |
807 | if (signal_pending(vcpu->arch.run_task)) | |
808 | return 0; | |
de56a948 PM |
809 | |
810 | /* | |
811 | * Make sure we are running on thread 0, and that | |
812 | * secondary threads are offline. | |
813 | * XXX we should also block attempts to bring any | |
814 | * secondary threads online. | |
815 | */ | |
371fefd6 PM |
816 | if (threads_per_core > 1 && !on_primary_thread()) { |
817 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) | |
818 | vcpu->arch.ret = -EBUSY; | |
819 | goto out; | |
de56a948 PM |
820 | } |
821 | ||
19ccb76a PM |
822 | /* |
823 | * Assign physical thread IDs, first to non-ceded vcpus | |
824 | * and then to ceded ones. | |
825 | */ | |
826 | ptid = 0; | |
827 | vcpu0 = NULL; | |
828 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { | |
829 | if (!vcpu->arch.ceded) { | |
830 | if (!ptid) | |
831 | vcpu0 = vcpu; | |
832 | vcpu->arch.ptid = ptid++; | |
833 | } | |
834 | } | |
835 | if (!vcpu0) | |
836 | return 0; /* nothing to run */ | |
837 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) | |
838 | if (vcpu->arch.ceded) | |
839 | vcpu->arch.ptid = ptid++; | |
840 | ||
371fefd6 PM |
841 | vc->n_woken = 0; |
842 | vc->nap_count = 0; | |
843 | vc->entry_exit_count = 0; | |
19ccb76a | 844 | vc->vcore_state = VCORE_RUNNING; |
0456ec4f | 845 | vc->stolen_tb += mftb() - vc->preempt_tb; |
371fefd6 PM |
846 | vc->in_guest = 0; |
847 | vc->pcpu = smp_processor_id(); | |
19ccb76a | 848 | vc->napping_threads = 0; |
2e25aa5f | 849 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { |
371fefd6 | 850 | kvmppc_start_thread(vcpu); |
2e25aa5f PM |
851 | if (vcpu->arch.vpa.update_pending || |
852 | vcpu->arch.slb_shadow.update_pending || | |
853 | vcpu->arch.dtl.update_pending) | |
854 | kvmppc_update_vpas(vcpu); | |
0456ec4f | 855 | kvmppc_create_dtl_entry(vcpu, vc); |
2e25aa5f | 856 | } |
f0888f70 PM |
857 | /* Grab any remaining hw threads so they can't go into the kernel */ |
858 | for (i = ptid; i < threads_per_core; ++i) | |
859 | kvmppc_grab_hwthread(vc->pcpu + i); | |
371fefd6 | 860 | |
19ccb76a | 861 | preempt_disable(); |
371fefd6 | 862 | spin_unlock(&vc->lock); |
de56a948 | 863 | |
371fefd6 | 864 | kvm_guest_enter(); |
19ccb76a | 865 | __kvmppc_vcore_entry(NULL, vcpu0); |
f0888f70 PM |
866 | for (i = 0; i < threads_per_core; ++i) |
867 | kvmppc_release_hwthread(vc->pcpu + i); | |
de56a948 | 868 | |
371fefd6 | 869 | spin_lock(&vc->lock); |
19ccb76a PM |
870 | /* disable sending of IPIs on virtual external irqs */ |
871 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) | |
872 | vcpu->cpu = -1; | |
873 | /* wait for secondary threads to finish writing their state to memory */ | |
371fefd6 PM |
874 | if (vc->nap_count < vc->n_woken) |
875 | kvmppc_wait_for_nap(vc); | |
876 | /* prevent other vcpu threads from doing kvmppc_start_thread() now */ | |
19ccb76a | 877 | vc->vcore_state = VCORE_EXITING; |
371fefd6 PM |
878 | spin_unlock(&vc->lock); |
879 | ||
880 | /* make sure updates to secondary vcpu structs are visible now */ | |
881 | smp_mb(); | |
de56a948 PM |
882 | kvm_guest_exit(); |
883 | ||
884 | preempt_enable(); | |
885 | kvm_resched(vcpu); | |
886 | ||
887 | now = get_tb(); | |
371fefd6 PM |
888 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { |
889 | /* cancel pending dec exception if dec is positive */ | |
890 | if (now < vcpu->arch.dec_expires && | |
891 | kvmppc_core_pending_dec(vcpu)) | |
892 | kvmppc_core_dequeue_dec(vcpu); | |
19ccb76a PM |
893 | |
894 | ret = RESUME_GUEST; | |
895 | if (vcpu->arch.trap) | |
896 | ret = kvmppc_handle_exit(vcpu->arch.kvm_run, vcpu, | |
897 | vcpu->arch.run_task); | |
898 | ||
371fefd6 PM |
899 | vcpu->arch.ret = ret; |
900 | vcpu->arch.trap = 0; | |
19ccb76a PM |
901 | |
902 | if (vcpu->arch.ceded) { | |
903 | if (ret != RESUME_GUEST) | |
904 | kvmppc_end_cede(vcpu); | |
905 | else | |
906 | kvmppc_set_timer(vcpu); | |
907 | } | |
371fefd6 | 908 | } |
de56a948 | 909 | |
371fefd6 | 910 | spin_lock(&vc->lock); |
de56a948 | 911 | out: |
19ccb76a | 912 | vc->vcore_state = VCORE_INACTIVE; |
0456ec4f | 913 | vc->preempt_tb = mftb(); |
371fefd6 PM |
914 | list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads, |
915 | arch.run_list) { | |
916 | if (vcpu->arch.ret != RESUME_GUEST) { | |
917 | kvmppc_remove_runnable(vc, vcpu); | |
918 | wake_up(&vcpu->arch.cpu_run); | |
919 | } | |
920 | } | |
921 | ||
922 | return 1; | |
923 | } | |
924 | ||
19ccb76a PM |
925 | /* |
926 | * Wait for some other vcpu thread to execute us, and | |
927 | * wake us up when we need to handle something in the host. | |
928 | */ | |
929 | static void kvmppc_wait_for_exec(struct kvm_vcpu *vcpu, int wait_state) | |
371fefd6 | 930 | { |
371fefd6 PM |
931 | DEFINE_WAIT(wait); |
932 | ||
19ccb76a PM |
933 | prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state); |
934 | if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) | |
935 | schedule(); | |
936 | finish_wait(&vcpu->arch.cpu_run, &wait); | |
937 | } | |
938 | ||
939 | /* | |
940 | * All the vcpus in this vcore are idle, so wait for a decrementer | |
941 | * or external interrupt to one of the vcpus. vc->lock is held. | |
942 | */ | |
943 | static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc) | |
944 | { | |
945 | DEFINE_WAIT(wait); | |
946 | struct kvm_vcpu *v; | |
947 | int all_idle = 1; | |
948 | ||
949 | prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE); | |
950 | vc->vcore_state = VCORE_SLEEPING; | |
951 | spin_unlock(&vc->lock); | |
952 | list_for_each_entry(v, &vc->runnable_threads, arch.run_list) { | |
953 | if (!v->arch.ceded || v->arch.pending_exceptions) { | |
954 | all_idle = 0; | |
955 | break; | |
956 | } | |
371fefd6 | 957 | } |
19ccb76a PM |
958 | if (all_idle) |
959 | schedule(); | |
960 | finish_wait(&vc->wq, &wait); | |
961 | spin_lock(&vc->lock); | |
962 | vc->vcore_state = VCORE_INACTIVE; | |
963 | } | |
371fefd6 | 964 | |
19ccb76a PM |
965 | static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
966 | { | |
967 | int n_ceded; | |
968 | int prev_state; | |
969 | struct kvmppc_vcore *vc; | |
970 | struct kvm_vcpu *v, *vn; | |
9e368f29 | 971 | |
371fefd6 PM |
972 | kvm_run->exit_reason = 0; |
973 | vcpu->arch.ret = RESUME_GUEST; | |
974 | vcpu->arch.trap = 0; | |
975 | ||
371fefd6 PM |
976 | /* |
977 | * Synchronize with other threads in this virtual core | |
978 | */ | |
979 | vc = vcpu->arch.vcore; | |
980 | spin_lock(&vc->lock); | |
19ccb76a | 981 | vcpu->arch.ceded = 0; |
371fefd6 PM |
982 | vcpu->arch.run_task = current; |
983 | vcpu->arch.kvm_run = kvm_run; | |
19ccb76a PM |
984 | prev_state = vcpu->arch.state; |
985 | vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; | |
371fefd6 PM |
986 | list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads); |
987 | ++vc->n_runnable; | |
988 | ||
19ccb76a PM |
989 | /* |
990 | * This happens the first time this is called for a vcpu. | |
991 | * If the vcore is already running, we may be able to start | |
992 | * this thread straight away and have it join in. | |
993 | */ | |
994 | if (prev_state == KVMPPC_VCPU_STOPPED) { | |
995 | if (vc->vcore_state == VCORE_RUNNING && | |
996 | VCORE_EXIT_COUNT(vc) == 0) { | |
997 | vcpu->arch.ptid = vc->n_runnable - 1; | |
998 | kvmppc_start_thread(vcpu); | |
371fefd6 PM |
999 | } |
1000 | ||
19ccb76a PM |
1001 | } else if (prev_state == KVMPPC_VCPU_BUSY_IN_HOST) |
1002 | --vc->n_busy; | |
371fefd6 | 1003 | |
19ccb76a PM |
1004 | while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && |
1005 | !signal_pending(current)) { | |
1006 | if (vc->n_busy || vc->vcore_state != VCORE_INACTIVE) { | |
1007 | spin_unlock(&vc->lock); | |
1008 | kvmppc_wait_for_exec(vcpu, TASK_INTERRUPTIBLE); | |
1009 | spin_lock(&vc->lock); | |
1010 | continue; | |
1011 | } | |
0456ec4f | 1012 | vc->runner = vcpu; |
19ccb76a PM |
1013 | n_ceded = 0; |
1014 | list_for_each_entry(v, &vc->runnable_threads, arch.run_list) | |
1015 | n_ceded += v->arch.ceded; | |
1016 | if (n_ceded == vc->n_runnable) | |
1017 | kvmppc_vcore_blocked(vc); | |
1018 | else | |
1019 | kvmppc_run_core(vc); | |
1020 | ||
1021 | list_for_each_entry_safe(v, vn, &vc->runnable_threads, | |
1022 | arch.run_list) { | |
7e28e60e | 1023 | kvmppc_core_prepare_to_enter(v); |
19ccb76a PM |
1024 | if (signal_pending(v->arch.run_task)) { |
1025 | kvmppc_remove_runnable(vc, v); | |
1026 | v->stat.signal_exits++; | |
1027 | v->arch.kvm_run->exit_reason = KVM_EXIT_INTR; | |
1028 | v->arch.ret = -EINTR; | |
1029 | wake_up(&v->arch.cpu_run); | |
1030 | } | |
1031 | } | |
0456ec4f | 1032 | vc->runner = NULL; |
19ccb76a | 1033 | } |
371fefd6 | 1034 | |
19ccb76a PM |
1035 | if (signal_pending(current)) { |
1036 | if (vc->vcore_state == VCORE_RUNNING || | |
1037 | vc->vcore_state == VCORE_EXITING) { | |
1038 | spin_unlock(&vc->lock); | |
1039 | kvmppc_wait_for_exec(vcpu, TASK_UNINTERRUPTIBLE); | |
1040 | spin_lock(&vc->lock); | |
1041 | } | |
1042 | if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { | |
1043 | kvmppc_remove_runnable(vc, vcpu); | |
1044 | vcpu->stat.signal_exits++; | |
1045 | kvm_run->exit_reason = KVM_EXIT_INTR; | |
1046 | vcpu->arch.ret = -EINTR; | |
1047 | } | |
371fefd6 PM |
1048 | } |
1049 | ||
371fefd6 | 1050 | spin_unlock(&vc->lock); |
371fefd6 | 1051 | return vcpu->arch.ret; |
de56a948 PM |
1052 | } |
1053 | ||
a8606e20 PM |
1054 | int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) |
1055 | { | |
1056 | int r; | |
1057 | ||
af8f38b3 AG |
1058 | if (!vcpu->arch.sane) { |
1059 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
1060 | return -EINVAL; | |
1061 | } | |
1062 | ||
25051b5a SW |
1063 | kvmppc_core_prepare_to_enter(vcpu); |
1064 | ||
19ccb76a PM |
1065 | /* No need to go into the guest when all we'll do is come back out */ |
1066 | if (signal_pending(current)) { | |
1067 | run->exit_reason = KVM_EXIT_INTR; | |
1068 | return -EINTR; | |
1069 | } | |
1070 | ||
c77162de PM |
1071 | /* On the first time here, set up VRMA or RMA */ |
1072 | if (!vcpu->kvm->arch.rma_setup_done) { | |
1073 | r = kvmppc_hv_setup_rma(vcpu); | |
1074 | if (r) | |
1075 | return r; | |
1076 | } | |
19ccb76a PM |
1077 | |
1078 | flush_fp_to_thread(current); | |
1079 | flush_altivec_to_thread(current); | |
1080 | flush_vsx_to_thread(current); | |
1081 | vcpu->arch.wqp = &vcpu->arch.vcore->wq; | |
342d3db7 | 1082 | vcpu->arch.pgdir = current->mm->pgd; |
19ccb76a | 1083 | |
a8606e20 PM |
1084 | do { |
1085 | r = kvmppc_run_vcpu(run, vcpu); | |
1086 | ||
1087 | if (run->exit_reason == KVM_EXIT_PAPR_HCALL && | |
1088 | !(vcpu->arch.shregs.msr & MSR_PR)) { | |
1089 | r = kvmppc_pseries_do_hcall(vcpu); | |
7e28e60e | 1090 | kvmppc_core_prepare_to_enter(vcpu); |
a8606e20 PM |
1091 | } |
1092 | } while (r == RESUME_GUEST); | |
1093 | return r; | |
1094 | } | |
1095 | ||
54738c09 | 1096 | |
aa04b4cc | 1097 | /* Work out RMLS (real mode limit selector) field value for a given RMA size. |
9e368f29 | 1098 | Assumes POWER7 or PPC970. */ |
aa04b4cc PM |
1099 | static inline int lpcr_rmls(unsigned long rma_size) |
1100 | { | |
1101 | switch (rma_size) { | |
1102 | case 32ul << 20: /* 32 MB */ | |
9e368f29 PM |
1103 | if (cpu_has_feature(CPU_FTR_ARCH_206)) |
1104 | return 8; /* only supported on POWER7 */ | |
1105 | return -1; | |
aa04b4cc PM |
1106 | case 64ul << 20: /* 64 MB */ |
1107 | return 3; | |
1108 | case 128ul << 20: /* 128 MB */ | |
1109 | return 7; | |
1110 | case 256ul << 20: /* 256 MB */ | |
1111 | return 4; | |
1112 | case 1ul << 30: /* 1 GB */ | |
1113 | return 2; | |
1114 | case 16ul << 30: /* 16 GB */ | |
1115 | return 1; | |
1116 | case 256ul << 30: /* 256 GB */ | |
1117 | return 0; | |
1118 | default: | |
1119 | return -1; | |
1120 | } | |
1121 | } | |
1122 | ||
1123 | static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |
1124 | { | |
b4e70611 | 1125 | struct kvmppc_linear_info *ri = vma->vm_file->private_data; |
aa04b4cc PM |
1126 | struct page *page; |
1127 | ||
1128 | if (vmf->pgoff >= ri->npages) | |
1129 | return VM_FAULT_SIGBUS; | |
1130 | ||
1131 | page = pfn_to_page(ri->base_pfn + vmf->pgoff); | |
1132 | get_page(page); | |
1133 | vmf->page = page; | |
1134 | return 0; | |
1135 | } | |
1136 | ||
1137 | static const struct vm_operations_struct kvm_rma_vm_ops = { | |
1138 | .fault = kvm_rma_fault, | |
1139 | }; | |
1140 | ||
1141 | static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma) | |
1142 | { | |
1143 | vma->vm_flags |= VM_RESERVED; | |
1144 | vma->vm_ops = &kvm_rma_vm_ops; | |
1145 | return 0; | |
1146 | } | |
1147 | ||
1148 | static int kvm_rma_release(struct inode *inode, struct file *filp) | |
1149 | { | |
b4e70611 | 1150 | struct kvmppc_linear_info *ri = filp->private_data; |
aa04b4cc PM |
1151 | |
1152 | kvm_release_rma(ri); | |
1153 | return 0; | |
1154 | } | |
1155 | ||
1156 | static struct file_operations kvm_rma_fops = { | |
1157 | .mmap = kvm_rma_mmap, | |
1158 | .release = kvm_rma_release, | |
1159 | }; | |
1160 | ||
1161 | long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret) | |
1162 | { | |
b4e70611 | 1163 | struct kvmppc_linear_info *ri; |
aa04b4cc PM |
1164 | long fd; |
1165 | ||
1166 | ri = kvm_alloc_rma(); | |
1167 | if (!ri) | |
1168 | return -ENOMEM; | |
1169 | ||
1170 | fd = anon_inode_getfd("kvm-rma", &kvm_rma_fops, ri, O_RDWR); | |
1171 | if (fd < 0) | |
1172 | kvm_release_rma(ri); | |
1173 | ||
1174 | ret->rma_size = ri->npages << PAGE_SHIFT; | |
1175 | return fd; | |
1176 | } | |
1177 | ||
5b74716e BH |
1178 | static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps, |
1179 | int linux_psize) | |
1180 | { | |
1181 | struct mmu_psize_def *def = &mmu_psize_defs[linux_psize]; | |
1182 | ||
1183 | if (!def->shift) | |
1184 | return; | |
1185 | (*sps)->page_shift = def->shift; | |
1186 | (*sps)->slb_enc = def->sllp; | |
1187 | (*sps)->enc[0].page_shift = def->shift; | |
1188 | (*sps)->enc[0].pte_enc = def->penc; | |
1189 | (*sps)++; | |
1190 | } | |
1191 | ||
1192 | int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info) | |
1193 | { | |
1194 | struct kvm_ppc_one_seg_page_size *sps; | |
1195 | ||
1196 | info->flags = KVM_PPC_PAGE_SIZES_REAL; | |
1197 | if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) | |
1198 | info->flags |= KVM_PPC_1T_SEGMENTS; | |
1199 | info->slb_size = mmu_slb_size; | |
1200 | ||
1201 | /* We only support these sizes for now, and no muti-size segments */ | |
1202 | sps = &info->sps[0]; | |
1203 | kvmppc_add_seg_page_size(&sps, MMU_PAGE_4K); | |
1204 | kvmppc_add_seg_page_size(&sps, MMU_PAGE_64K); | |
1205 | kvmppc_add_seg_page_size(&sps, MMU_PAGE_16M); | |
1206 | ||
1207 | return 0; | |
1208 | } | |
1209 | ||
82ed3616 PM |
1210 | /* |
1211 | * Get (and clear) the dirty memory log for a memory slot. | |
1212 | */ | |
1213 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) | |
1214 | { | |
1215 | struct kvm_memory_slot *memslot; | |
1216 | int r; | |
1217 | unsigned long n; | |
1218 | ||
1219 | mutex_lock(&kvm->slots_lock); | |
1220 | ||
1221 | r = -EINVAL; | |
1222 | if (log->slot >= KVM_MEMORY_SLOTS) | |
1223 | goto out; | |
1224 | ||
1225 | memslot = id_to_memslot(kvm->memslots, log->slot); | |
1226 | r = -ENOENT; | |
1227 | if (!memslot->dirty_bitmap) | |
1228 | goto out; | |
1229 | ||
1230 | n = kvm_dirty_bitmap_bytes(memslot); | |
1231 | memset(memslot->dirty_bitmap, 0, n); | |
1232 | ||
1233 | r = kvmppc_hv_get_dirty_log(kvm, memslot); | |
1234 | if (r) | |
1235 | goto out; | |
1236 | ||
1237 | r = -EFAULT; | |
1238 | if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) | |
1239 | goto out; | |
1240 | ||
1241 | r = 0; | |
1242 | out: | |
1243 | mutex_unlock(&kvm->slots_lock); | |
1244 | return r; | |
1245 | } | |
1246 | ||
da9d1d7f PM |
1247 | static unsigned long slb_pgsize_encoding(unsigned long psize) |
1248 | { | |
1249 | unsigned long senc = 0; | |
1250 | ||
1251 | if (psize > 0x1000) { | |
1252 | senc = SLB_VSID_L; | |
1253 | if (psize == 0x10000) | |
1254 | senc |= SLB_VSID_LP_01; | |
1255 | } | |
1256 | return senc; | |
1257 | } | |
1258 | ||
de56a948 PM |
1259 | int kvmppc_core_prepare_memory_region(struct kvm *kvm, |
1260 | struct kvm_userspace_memory_region *mem) | |
1261 | { | |
c77162de | 1262 | unsigned long npages; |
b2b2f165 | 1263 | unsigned long *phys; |
aa04b4cc | 1264 | |
b2b2f165 | 1265 | /* Allocate a slot_phys array */ |
b2b2f165 | 1266 | phys = kvm->arch.slot_phys[mem->slot]; |
342d3db7 PM |
1267 | if (!kvm->arch.using_mmu_notifiers && !phys) { |
1268 | npages = mem->memory_size >> PAGE_SHIFT; | |
b2b2f165 PM |
1269 | phys = vzalloc(npages * sizeof(unsigned long)); |
1270 | if (!phys) | |
1271 | return -ENOMEM; | |
1272 | kvm->arch.slot_phys[mem->slot] = phys; | |
1273 | kvm->arch.slot_npages[mem->slot] = npages; | |
1274 | } | |
aa04b4cc | 1275 | |
c77162de PM |
1276 | return 0; |
1277 | } | |
aa04b4cc | 1278 | |
c77162de PM |
1279 | static void unpin_slot(struct kvm *kvm, int slot_id) |
1280 | { | |
1281 | unsigned long *physp; | |
1282 | unsigned long j, npages, pfn; | |
1283 | struct page *page; | |
1284 | ||
1285 | physp = kvm->arch.slot_phys[slot_id]; | |
1286 | npages = kvm->arch.slot_npages[slot_id]; | |
1287 | if (physp) { | |
1288 | spin_lock(&kvm->arch.slot_phys_lock); | |
1289 | for (j = 0; j < npages; j++) { | |
1290 | if (!(physp[j] & KVMPPC_GOT_PAGE)) | |
1291 | continue; | |
1292 | pfn = physp[j] >> PAGE_SHIFT; | |
1293 | page = pfn_to_page(pfn); | |
1294 | SetPageDirty(page); | |
1295 | put_page(page); | |
9e368f29 | 1296 | } |
c77162de PM |
1297 | kvm->arch.slot_phys[slot_id] = NULL; |
1298 | spin_unlock(&kvm->arch.slot_phys_lock); | |
1299 | vfree(physp); | |
aa04b4cc | 1300 | } |
c77162de PM |
1301 | } |
1302 | ||
1303 | void kvmppc_core_commit_memory_region(struct kvm *kvm, | |
1304 | struct kvm_userspace_memory_region *mem) | |
1305 | { | |
1306 | } | |
1307 | ||
1308 | static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu) | |
1309 | { | |
1310 | int err = 0; | |
1311 | struct kvm *kvm = vcpu->kvm; | |
b4e70611 | 1312 | struct kvmppc_linear_info *ri = NULL; |
c77162de PM |
1313 | unsigned long hva; |
1314 | struct kvm_memory_slot *memslot; | |
1315 | struct vm_area_struct *vma; | |
da9d1d7f | 1316 | unsigned long lpcr, senc; |
c77162de PM |
1317 | unsigned long psize, porder; |
1318 | unsigned long rma_size; | |
1319 | unsigned long rmls; | |
1320 | unsigned long *physp; | |
da9d1d7f | 1321 | unsigned long i, npages; |
c77162de PM |
1322 | |
1323 | mutex_lock(&kvm->lock); | |
1324 | if (kvm->arch.rma_setup_done) | |
1325 | goto out; /* another vcpu beat us to it */ | |
aa04b4cc | 1326 | |
c77162de PM |
1327 | /* Look up the memslot for guest physical address 0 */ |
1328 | memslot = gfn_to_memslot(kvm, 0); | |
aa04b4cc | 1329 | |
c77162de PM |
1330 | /* We must have some memory at 0 by now */ |
1331 | err = -EINVAL; | |
1332 | if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) | |
1333 | goto out; | |
1334 | ||
1335 | /* Look up the VMA for the start of this memory slot */ | |
1336 | hva = memslot->userspace_addr; | |
1337 | down_read(¤t->mm->mmap_sem); | |
1338 | vma = find_vma(current->mm, hva); | |
1339 | if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO)) | |
1340 | goto up_out; | |
1341 | ||
1342 | psize = vma_kernel_pagesize(vma); | |
da9d1d7f | 1343 | porder = __ilog2(psize); |
c77162de PM |
1344 | |
1345 | /* Is this one of our preallocated RMAs? */ | |
1346 | if (vma->vm_file && vma->vm_file->f_op == &kvm_rma_fops && | |
1347 | hva == vma->vm_start) | |
1348 | ri = vma->vm_file->private_data; | |
1349 | ||
1350 | up_read(¤t->mm->mmap_sem); | |
1351 | ||
1352 | if (!ri) { | |
1353 | /* On POWER7, use VRMA; on PPC970, give up */ | |
1354 | err = -EPERM; | |
1355 | if (cpu_has_feature(CPU_FTR_ARCH_201)) { | |
1356 | pr_err("KVM: CPU requires an RMO\n"); | |
1357 | goto out; | |
1358 | } | |
1359 | ||
da9d1d7f PM |
1360 | /* We can handle 4k, 64k or 16M pages in the VRMA */ |
1361 | err = -EINVAL; | |
1362 | if (!(psize == 0x1000 || psize == 0x10000 || | |
1363 | psize == 0x1000000)) | |
1364 | goto out; | |
1365 | ||
c77162de | 1366 | /* Update VRMASD field in the LPCR */ |
da9d1d7f | 1367 | senc = slb_pgsize_encoding(psize); |
697d3899 PM |
1368 | kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | |
1369 | (VRMA_VSID << SLB_VSID_SHIFT_1T); | |
da9d1d7f PM |
1370 | lpcr = kvm->arch.lpcr & ~LPCR_VRMASD; |
1371 | lpcr |= senc << (LPCR_VRMASD_SH - 4); | |
c77162de PM |
1372 | kvm->arch.lpcr = lpcr; |
1373 | ||
1374 | /* Create HPTEs in the hash page table for the VRMA */ | |
da9d1d7f | 1375 | kvmppc_map_vrma(vcpu, memslot, porder); |
c77162de PM |
1376 | |
1377 | } else { | |
1378 | /* Set up to use an RMO region */ | |
1379 | rma_size = ri->npages; | |
1380 | if (rma_size > memslot->npages) | |
1381 | rma_size = memslot->npages; | |
1382 | rma_size <<= PAGE_SHIFT; | |
aa04b4cc | 1383 | rmls = lpcr_rmls(rma_size); |
c77162de | 1384 | err = -EINVAL; |
aa04b4cc | 1385 | if (rmls < 0) { |
c77162de PM |
1386 | pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size); |
1387 | goto out; | |
aa04b4cc PM |
1388 | } |
1389 | atomic_inc(&ri->use_count); | |
1390 | kvm->arch.rma = ri; | |
9e368f29 PM |
1391 | |
1392 | /* Update LPCR and RMOR */ | |
1393 | lpcr = kvm->arch.lpcr; | |
1394 | if (cpu_has_feature(CPU_FTR_ARCH_201)) { | |
1395 | /* PPC970; insert RMLS value (split field) in HID4 */ | |
1396 | lpcr &= ~((1ul << HID4_RMLS0_SH) | | |
1397 | (3ul << HID4_RMLS2_SH)); | |
1398 | lpcr |= ((rmls >> 2) << HID4_RMLS0_SH) | | |
1399 | ((rmls & 3) << HID4_RMLS2_SH); | |
1400 | /* RMOR is also in HID4 */ | |
1401 | lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff) | |
1402 | << HID4_RMOR_SH; | |
1403 | } else { | |
1404 | /* POWER7 */ | |
1405 | lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L); | |
1406 | lpcr |= rmls << LPCR_RMLS_SH; | |
1407 | kvm->arch.rmor = kvm->arch.rma->base_pfn << PAGE_SHIFT; | |
1408 | } | |
aa04b4cc | 1409 | kvm->arch.lpcr = lpcr; |
c77162de | 1410 | pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n", |
aa04b4cc | 1411 | ri->base_pfn << PAGE_SHIFT, rma_size, lpcr); |
aa04b4cc | 1412 | |
c77162de | 1413 | /* Initialize phys addrs of pages in RMO */ |
da9d1d7f PM |
1414 | npages = ri->npages; |
1415 | porder = __ilog2(npages); | |
c77162de PM |
1416 | physp = kvm->arch.slot_phys[memslot->id]; |
1417 | spin_lock(&kvm->arch.slot_phys_lock); | |
1418 | for (i = 0; i < npages; ++i) | |
da9d1d7f | 1419 | physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) + porder; |
c77162de | 1420 | spin_unlock(&kvm->arch.slot_phys_lock); |
aa04b4cc PM |
1421 | } |
1422 | ||
c77162de PM |
1423 | /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */ |
1424 | smp_wmb(); | |
1425 | kvm->arch.rma_setup_done = 1; | |
1426 | err = 0; | |
1427 | out: | |
1428 | mutex_unlock(&kvm->lock); | |
1429 | return err; | |
b2b2f165 | 1430 | |
c77162de PM |
1431 | up_out: |
1432 | up_read(¤t->mm->mmap_sem); | |
1433 | goto out; | |
de56a948 PM |
1434 | } |
1435 | ||
1436 | int kvmppc_core_init_vm(struct kvm *kvm) | |
1437 | { | |
1438 | long r; | |
aa04b4cc | 1439 | unsigned long lpcr; |
de56a948 PM |
1440 | |
1441 | /* Allocate hashed page table */ | |
1442 | r = kvmppc_alloc_hpt(kvm); | |
54738c09 DG |
1443 | if (r) |
1444 | return r; | |
de56a948 | 1445 | |
54738c09 | 1446 | INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); |
aa04b4cc | 1447 | |
aa04b4cc | 1448 | kvm->arch.rma = NULL; |
aa04b4cc | 1449 | |
9e368f29 | 1450 | kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); |
aa04b4cc | 1451 | |
9e368f29 PM |
1452 | if (cpu_has_feature(CPU_FTR_ARCH_201)) { |
1453 | /* PPC970; HID4 is effectively the LPCR */ | |
1454 | unsigned long lpid = kvm->arch.lpid; | |
1455 | kvm->arch.host_lpid = 0; | |
1456 | kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4); | |
1457 | lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH)); | |
1458 | lpcr |= ((lpid >> 4) << HID4_LPID1_SH) | | |
1459 | ((lpid & 0xf) << HID4_LPID5_SH); | |
1460 | } else { | |
1461 | /* POWER7; init LPCR for virtual RMA mode */ | |
1462 | kvm->arch.host_lpid = mfspr(SPRN_LPID); | |
1463 | kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); | |
1464 | lpcr &= LPCR_PECE | LPCR_LPES; | |
1465 | lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE | | |
697d3899 PM |
1466 | LPCR_VPM0 | LPCR_VPM1; |
1467 | kvm->arch.vrma_slb_v = SLB_VSID_B_1T | | |
1468 | (VRMA_VSID << SLB_VSID_SHIFT_1T); | |
9e368f29 PM |
1469 | } |
1470 | kvm->arch.lpcr = lpcr; | |
aa04b4cc | 1471 | |
342d3db7 | 1472 | kvm->arch.using_mmu_notifiers = !!cpu_has_feature(CPU_FTR_ARCH_206); |
c77162de | 1473 | spin_lock_init(&kvm->arch.slot_phys_lock); |
54738c09 | 1474 | return 0; |
de56a948 PM |
1475 | } |
1476 | ||
1477 | void kvmppc_core_destroy_vm(struct kvm *kvm) | |
1478 | { | |
aa04b4cc PM |
1479 | unsigned long i; |
1480 | ||
342d3db7 PM |
1481 | if (!kvm->arch.using_mmu_notifiers) |
1482 | for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) | |
1483 | unpin_slot(kvm, i); | |
b2b2f165 | 1484 | |
aa04b4cc PM |
1485 | if (kvm->arch.rma) { |
1486 | kvm_release_rma(kvm->arch.rma); | |
1487 | kvm->arch.rma = NULL; | |
1488 | } | |
1489 | ||
de56a948 | 1490 | kvmppc_free_hpt(kvm); |
54738c09 | 1491 | WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); |
de56a948 PM |
1492 | } |
1493 | ||
1494 | /* These are stubs for now */ | |
1495 | void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) | |
1496 | { | |
1497 | } | |
1498 | ||
1499 | /* We don't need to emulate any privileged instructions or dcbz */ | |
1500 | int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
1501 | unsigned int inst, int *advance) | |
1502 | { | |
1503 | return EMULATE_FAIL; | |
1504 | } | |
1505 | ||
54771e62 | 1506 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) |
de56a948 PM |
1507 | { |
1508 | return EMULATE_FAIL; | |
1509 | } | |
1510 | ||
54771e62 | 1511 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) |
de56a948 PM |
1512 | { |
1513 | return EMULATE_FAIL; | |
1514 | } | |
1515 | ||
1516 | static int kvmppc_book3s_hv_init(void) | |
1517 | { | |
1518 | int r; | |
1519 | ||
1520 | r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); | |
1521 | ||
1522 | if (r) | |
1523 | return r; | |
1524 | ||
1525 | r = kvmppc_mmu_hv_init(); | |
1526 | ||
1527 | return r; | |
1528 | } | |
1529 | ||
1530 | static void kvmppc_book3s_hv_exit(void) | |
1531 | { | |
1532 | kvm_exit(); | |
1533 | } | |
1534 | ||
1535 | module_init(kvmppc_book3s_hv_init); | |
1536 | module_exit(kvmppc_book3s_hv_exit); |