2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/gfp.h>
25 #include <linux/module.h>
26 #include <linux/vmalloc.h>
29 #include <asm/cputable.h>
30 #include <asm/uaccess.h>
31 #include <asm/kvm_ppc.h>
33 #include <asm/cacheflush.h>
37 unsigned long kvmppc_booke_handlers
;
39 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
40 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42 struct kvm_stats_debugfs_item debugfs_entries
[] = {
43 { "mmio", VCPU_STAT(mmio_exits
) },
44 { "dcr", VCPU_STAT(dcr_exits
) },
45 { "sig", VCPU_STAT(signal_exits
) },
46 { "itlb_r", VCPU_STAT(itlb_real_miss_exits
) },
47 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits
) },
48 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits
) },
49 { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits
) },
50 { "sysc", VCPU_STAT(syscall_exits
) },
51 { "isi", VCPU_STAT(isi_exits
) },
52 { "dsi", VCPU_STAT(dsi_exits
) },
53 { "inst_emu", VCPU_STAT(emulated_inst_exits
) },
54 { "dec", VCPU_STAT(dec_exits
) },
55 { "ext_intr", VCPU_STAT(ext_intr_exits
) },
56 { "halt_wakeup", VCPU_STAT(halt_wakeup
) },
60 /* TODO: use vcpu_printf() */
61 void kvmppc_dump_vcpu(struct kvm_vcpu
*vcpu
)
65 printk("pc: %08lx msr: %08llx\n", vcpu
->arch
.pc
, vcpu
->arch
.shared
->msr
);
66 printk("lr: %08lx ctr: %08lx\n", vcpu
->arch
.lr
, vcpu
->arch
.ctr
);
67 printk("srr0: %08llx srr1: %08llx\n", vcpu
->arch
.shared
->srr0
,
68 vcpu
->arch
.shared
->srr1
);
70 printk("exceptions: %08lx\n", vcpu
->arch
.pending_exceptions
);
72 for (i
= 0; i
< 32; i
+= 4) {
73 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i
,
74 kvmppc_get_gpr(vcpu
, i
),
75 kvmppc_get_gpr(vcpu
, i
+1),
76 kvmppc_get_gpr(vcpu
, i
+2),
77 kvmppc_get_gpr(vcpu
, i
+3));
81 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu
*vcpu
,
82 unsigned int priority
)
84 set_bit(priority
, &vcpu
->arch
.pending_exceptions
);
87 static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu
*vcpu
,
88 ulong dear_flags
, ulong esr_flags
)
90 vcpu
->arch
.queued_dear
= dear_flags
;
91 vcpu
->arch
.queued_esr
= esr_flags
;
92 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_DTLB_MISS
);
95 static void kvmppc_core_queue_data_storage(struct kvm_vcpu
*vcpu
,
96 ulong dear_flags
, ulong esr_flags
)
98 vcpu
->arch
.queued_dear
= dear_flags
;
99 vcpu
->arch
.queued_esr
= esr_flags
;
100 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_DATA_STORAGE
);
103 static void kvmppc_core_queue_inst_storage(struct kvm_vcpu
*vcpu
,
106 vcpu
->arch
.queued_esr
= esr_flags
;
107 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_INST_STORAGE
);
110 void kvmppc_core_queue_program(struct kvm_vcpu
*vcpu
, ulong esr_flags
)
112 vcpu
->arch
.queued_esr
= esr_flags
;
113 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_PROGRAM
);
116 void kvmppc_core_queue_dec(struct kvm_vcpu
*vcpu
)
118 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_DECREMENTER
);
121 int kvmppc_core_pending_dec(struct kvm_vcpu
*vcpu
)
123 return test_bit(BOOKE_IRQPRIO_DECREMENTER
, &vcpu
->arch
.pending_exceptions
);
126 void kvmppc_core_dequeue_dec(struct kvm_vcpu
*vcpu
)
128 clear_bit(BOOKE_IRQPRIO_DECREMENTER
, &vcpu
->arch
.pending_exceptions
);
131 void kvmppc_core_queue_external(struct kvm_vcpu
*vcpu
,
132 struct kvm_interrupt
*irq
)
134 unsigned int prio
= BOOKE_IRQPRIO_EXTERNAL
;
136 if (irq
->irq
== KVM_INTERRUPT_SET_LEVEL
)
137 prio
= BOOKE_IRQPRIO_EXTERNAL_LEVEL
;
139 kvmppc_booke_queue_irqprio(vcpu
, prio
);
142 void kvmppc_core_dequeue_external(struct kvm_vcpu
*vcpu
,
143 struct kvm_interrupt
*irq
)
145 clear_bit(BOOKE_IRQPRIO_EXTERNAL
, &vcpu
->arch
.pending_exceptions
);
146 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL
, &vcpu
->arch
.pending_exceptions
);
149 /* Deliver the interrupt of the corresponding priority, if possible. */
150 static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu
*vcpu
,
151 unsigned int priority
)
154 ulong
uninitialized_var(msr_mask
);
155 bool update_esr
= false, update_dear
= false;
156 ulong crit_raw
= vcpu
->arch
.shared
->critical
;
157 ulong crit_r1
= kvmppc_get_gpr(vcpu
, 1);
159 bool keep_irq
= false;
161 /* Truncate crit indicators in 32 bit mode */
162 if (!(vcpu
->arch
.shared
->msr
& MSR_SF
)) {
163 crit_raw
&= 0xffffffff;
164 crit_r1
&= 0xffffffff;
167 /* Critical section when crit == r1 */
168 crit
= (crit_raw
== crit_r1
);
169 /* ... and we're in supervisor mode */
170 crit
= crit
&& !(vcpu
->arch
.shared
->msr
& MSR_PR
);
172 if (priority
== BOOKE_IRQPRIO_EXTERNAL_LEVEL
) {
173 priority
= BOOKE_IRQPRIO_EXTERNAL
;
178 case BOOKE_IRQPRIO_DTLB_MISS
:
179 case BOOKE_IRQPRIO_DATA_STORAGE
:
182 case BOOKE_IRQPRIO_INST_STORAGE
:
183 case BOOKE_IRQPRIO_PROGRAM
:
186 case BOOKE_IRQPRIO_ITLB_MISS
:
187 case BOOKE_IRQPRIO_SYSCALL
:
188 case BOOKE_IRQPRIO_FP_UNAVAIL
:
189 case BOOKE_IRQPRIO_SPE_UNAVAIL
:
190 case BOOKE_IRQPRIO_SPE_FP_DATA
:
191 case BOOKE_IRQPRIO_SPE_FP_ROUND
:
192 case BOOKE_IRQPRIO_AP_UNAVAIL
:
193 case BOOKE_IRQPRIO_ALIGNMENT
:
195 msr_mask
= MSR_CE
|MSR_ME
|MSR_DE
;
197 case BOOKE_IRQPRIO_CRITICAL
:
198 case BOOKE_IRQPRIO_WATCHDOG
:
199 allowed
= vcpu
->arch
.shared
->msr
& MSR_CE
;
202 case BOOKE_IRQPRIO_MACHINE_CHECK
:
203 allowed
= vcpu
->arch
.shared
->msr
& MSR_ME
;
206 case BOOKE_IRQPRIO_EXTERNAL
:
207 case BOOKE_IRQPRIO_DECREMENTER
:
208 case BOOKE_IRQPRIO_FIT
:
209 allowed
= vcpu
->arch
.shared
->msr
& MSR_EE
;
210 allowed
= allowed
&& !crit
;
211 msr_mask
= MSR_CE
|MSR_ME
|MSR_DE
;
213 case BOOKE_IRQPRIO_DEBUG
:
214 allowed
= vcpu
->arch
.shared
->msr
& MSR_DE
;
220 vcpu
->arch
.shared
->srr0
= vcpu
->arch
.pc
;
221 vcpu
->arch
.shared
->srr1
= vcpu
->arch
.shared
->msr
;
222 vcpu
->arch
.pc
= vcpu
->arch
.ivpr
| vcpu
->arch
.ivor
[priority
];
223 if (update_esr
== true)
224 vcpu
->arch
.esr
= vcpu
->arch
.queued_esr
;
225 if (update_dear
== true)
226 vcpu
->arch
.shared
->dar
= vcpu
->arch
.queued_dear
;
227 kvmppc_set_msr(vcpu
, vcpu
->arch
.shared
->msr
& msr_mask
);
230 clear_bit(priority
, &vcpu
->arch
.pending_exceptions
);
236 /* Check pending exceptions and deliver one, if possible. */
237 void kvmppc_core_deliver_interrupts(struct kvm_vcpu
*vcpu
)
239 unsigned long *pending
= &vcpu
->arch
.pending_exceptions
;
240 unsigned long old_pending
= vcpu
->arch
.pending_exceptions
;
241 unsigned int priority
;
243 priority
= __ffs(*pending
);
244 while (priority
<= BOOKE_IRQPRIO_MAX
) {
245 if (kvmppc_booke_irqprio_deliver(vcpu
, priority
))
248 priority
= find_next_bit(pending
,
249 BITS_PER_BYTE
* sizeof(*pending
),
253 /* Tell the guest about our interrupt status */
255 vcpu
->arch
.shared
->int_pending
= 1;
256 else if (old_pending
)
257 vcpu
->arch
.shared
->int_pending
= 0;
263 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
265 int kvmppc_handle_exit(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
266 unsigned int exit_nr
)
268 enum emulation_result er
;
271 /* update before a new last_exit_type is rewritten */
272 kvmppc_update_timing_stats(vcpu
);
276 run
->exit_reason
= KVM_EXIT_UNKNOWN
;
277 run
->ready_for_interrupt_injection
= 1;
280 case BOOKE_INTERRUPT_MACHINE_CHECK
:
281 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR
));
282 kvmppc_dump_vcpu(vcpu
);
286 case BOOKE_INTERRUPT_EXTERNAL
:
287 kvmppc_account_exit(vcpu
, EXT_INTR_EXITS
);
293 case BOOKE_INTERRUPT_DECREMENTER
:
294 /* Since we switched IVPR back to the host's value, the host
295 * handled this interrupt the moment we enabled interrupts.
296 * Now we just offer it a chance to reschedule the guest. */
297 kvmppc_account_exit(vcpu
, DEC_EXITS
);
303 case BOOKE_INTERRUPT_PROGRAM
:
304 if (vcpu
->arch
.shared
->msr
& MSR_PR
) {
305 /* Program traps generated by user-level software must be handled
306 * by the guest kernel. */
307 kvmppc_core_queue_program(vcpu
, vcpu
->arch
.fault_esr
);
309 kvmppc_account_exit(vcpu
, USR_PR_INST
);
313 er
= kvmppc_emulate_instruction(run
, vcpu
);
316 /* don't overwrite subtypes, just account kvm_stats */
317 kvmppc_account_exit_stat(vcpu
, EMULATED_INST_EXITS
);
318 /* Future optimization: only reload non-volatiles if
319 * they were actually modified by emulation. */
323 run
->exit_reason
= KVM_EXIT_DCR
;
327 /* XXX Deliver Program interrupt to guest. */
328 printk(KERN_CRIT
"%s: emulation at %lx failed (%08x)\n",
329 __func__
, vcpu
->arch
.pc
, vcpu
->arch
.last_inst
);
330 /* For debugging, encode the failing instruction and
331 * report it to userspace. */
332 run
->hw
.hardware_exit_reason
= ~0ULL << 32;
333 run
->hw
.hardware_exit_reason
|= vcpu
->arch
.last_inst
;
341 case BOOKE_INTERRUPT_FP_UNAVAIL
:
342 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_FP_UNAVAIL
);
343 kvmppc_account_exit(vcpu
, FP_UNAVAIL
);
347 case BOOKE_INTERRUPT_SPE_UNAVAIL
:
348 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_SPE_UNAVAIL
);
352 case BOOKE_INTERRUPT_SPE_FP_DATA
:
353 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_SPE_FP_DATA
);
357 case BOOKE_INTERRUPT_SPE_FP_ROUND
:
358 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_SPE_FP_ROUND
);
362 case BOOKE_INTERRUPT_DATA_STORAGE
:
363 kvmppc_core_queue_data_storage(vcpu
, vcpu
->arch
.fault_dear
,
364 vcpu
->arch
.fault_esr
);
365 kvmppc_account_exit(vcpu
, DSI_EXITS
);
369 case BOOKE_INTERRUPT_INST_STORAGE
:
370 kvmppc_core_queue_inst_storage(vcpu
, vcpu
->arch
.fault_esr
);
371 kvmppc_account_exit(vcpu
, ISI_EXITS
);
375 case BOOKE_INTERRUPT_SYSCALL
:
376 if (!(vcpu
->arch
.shared
->msr
& MSR_PR
) &&
377 (((u32
)kvmppc_get_gpr(vcpu
, 0)) == KVM_SC_MAGIC_R0
)) {
378 /* KVM PV hypercalls */
379 kvmppc_set_gpr(vcpu
, 3, kvmppc_kvm_pv(vcpu
));
383 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_SYSCALL
);
385 kvmppc_account_exit(vcpu
, SYSCALL_EXITS
);
389 case BOOKE_INTERRUPT_DTLB_MISS
: {
390 unsigned long eaddr
= vcpu
->arch
.fault_dear
;
395 /* Check the guest TLB. */
396 gtlb_index
= kvmppc_mmu_dtlb_index(vcpu
, eaddr
);
397 if (gtlb_index
< 0) {
398 /* The guest didn't have a mapping for it. */
399 kvmppc_core_queue_dtlb_miss(vcpu
,
400 vcpu
->arch
.fault_dear
,
401 vcpu
->arch
.fault_esr
);
402 kvmppc_mmu_dtlb_miss(vcpu
);
403 kvmppc_account_exit(vcpu
, DTLB_REAL_MISS_EXITS
);
408 gpaddr
= kvmppc_mmu_xlate(vcpu
, gtlb_index
, eaddr
);
409 gfn
= gpaddr
>> PAGE_SHIFT
;
411 if (kvm_is_visible_gfn(vcpu
->kvm
, gfn
)) {
412 /* The guest TLB had a mapping, but the shadow TLB
413 * didn't, and it is RAM. This could be because:
414 * a) the entry is mapping the host kernel, or
415 * b) the guest used a large mapping which we're faking
416 * Either way, we need to satisfy the fault without
417 * invoking the guest. */
418 kvmppc_mmu_map(vcpu
, eaddr
, gpaddr
, gtlb_index
);
419 kvmppc_account_exit(vcpu
, DTLB_VIRT_MISS_EXITS
);
422 /* Guest has mapped and accessed a page which is not
424 vcpu
->arch
.paddr_accessed
= gpaddr
;
425 r
= kvmppc_emulate_mmio(run
, vcpu
);
426 kvmppc_account_exit(vcpu
, MMIO_EXITS
);
432 case BOOKE_INTERRUPT_ITLB_MISS
: {
433 unsigned long eaddr
= vcpu
->arch
.pc
;
440 /* Check the guest TLB. */
441 gtlb_index
= kvmppc_mmu_itlb_index(vcpu
, eaddr
);
442 if (gtlb_index
< 0) {
443 /* The guest didn't have a mapping for it. */
444 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_ITLB_MISS
);
445 kvmppc_mmu_itlb_miss(vcpu
);
446 kvmppc_account_exit(vcpu
, ITLB_REAL_MISS_EXITS
);
450 kvmppc_account_exit(vcpu
, ITLB_VIRT_MISS_EXITS
);
452 gpaddr
= kvmppc_mmu_xlate(vcpu
, gtlb_index
, eaddr
);
453 gfn
= gpaddr
>> PAGE_SHIFT
;
455 if (kvm_is_visible_gfn(vcpu
->kvm
, gfn
)) {
456 /* The guest TLB had a mapping, but the shadow TLB
457 * didn't. This could be because:
458 * a) the entry is mapping the host kernel, or
459 * b) the guest used a large mapping which we're faking
460 * Either way, we need to satisfy the fault without
461 * invoking the guest. */
462 kvmppc_mmu_map(vcpu
, eaddr
, gpaddr
, gtlb_index
);
464 /* Guest mapped and leaped at non-RAM! */
465 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_MACHINE_CHECK
);
471 case BOOKE_INTERRUPT_DEBUG
: {
474 vcpu
->arch
.pc
= mfspr(SPRN_CSRR0
);
476 /* clear IAC events in DBSR register */
477 dbsr
= mfspr(SPRN_DBSR
);
478 dbsr
&= DBSR_IAC1
| DBSR_IAC2
| DBSR_IAC3
| DBSR_IAC4
;
479 mtspr(SPRN_DBSR
, dbsr
);
481 run
->exit_reason
= KVM_EXIT_DEBUG
;
482 kvmppc_account_exit(vcpu
, DEBUG_EXITS
);
488 printk(KERN_EMERG
"exit_nr %d\n", exit_nr
);
494 kvmppc_core_deliver_interrupts(vcpu
);
496 if (!(r
& RESUME_HOST
)) {
497 /* To avoid clobbering exit_reason, only check for signals if
498 * we aren't already exiting to userspace for some other
500 if (signal_pending(current
)) {
501 run
->exit_reason
= KVM_EXIT_INTR
;
502 r
= (-EINTR
<< 2) | RESUME_HOST
| (r
& RESUME_FLAG_NV
);
503 kvmppc_account_exit(vcpu
, SIGNAL_EXITS
);
510 /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
511 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
516 vcpu
->arch
.shared
->msr
= 0;
517 kvmppc_set_gpr(vcpu
, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
519 vcpu
->arch
.shadow_pid
= 1;
521 /* Eye-catching numbers so we know if the guest takes an interrupt
522 * before it's programmed its own IVPR/IVORs. */
523 vcpu
->arch
.ivpr
= 0x55550000;
524 for (i
= 0; i
< BOOKE_IRQPRIO_MAX
; i
++)
525 vcpu
->arch
.ivor
[i
] = 0x7700 | i
* 4;
527 kvmppc_init_timing_stats(vcpu
);
529 return kvmppc_core_vcpu_setup(vcpu
);
532 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
536 regs
->pc
= vcpu
->arch
.pc
;
537 regs
->cr
= kvmppc_get_cr(vcpu
);
538 regs
->ctr
= vcpu
->arch
.ctr
;
539 regs
->lr
= vcpu
->arch
.lr
;
540 regs
->xer
= kvmppc_get_xer(vcpu
);
541 regs
->msr
= vcpu
->arch
.shared
->msr
;
542 regs
->srr0
= vcpu
->arch
.shared
->srr0
;
543 regs
->srr1
= vcpu
->arch
.shared
->srr1
;
544 regs
->pid
= vcpu
->arch
.pid
;
545 regs
->sprg0
= vcpu
->arch
.shared
->sprg0
;
546 regs
->sprg1
= vcpu
->arch
.shared
->sprg1
;
547 regs
->sprg2
= vcpu
->arch
.shared
->sprg2
;
548 regs
->sprg3
= vcpu
->arch
.shared
->sprg3
;
549 regs
->sprg5
= vcpu
->arch
.sprg4
;
550 regs
->sprg6
= vcpu
->arch
.sprg5
;
551 regs
->sprg7
= vcpu
->arch
.sprg6
;
553 for (i
= 0; i
< ARRAY_SIZE(regs
->gpr
); i
++)
554 regs
->gpr
[i
] = kvmppc_get_gpr(vcpu
, i
);
559 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
563 vcpu
->arch
.pc
= regs
->pc
;
564 kvmppc_set_cr(vcpu
, regs
->cr
);
565 vcpu
->arch
.ctr
= regs
->ctr
;
566 vcpu
->arch
.lr
= regs
->lr
;
567 kvmppc_set_xer(vcpu
, regs
->xer
);
568 kvmppc_set_msr(vcpu
, regs
->msr
);
569 vcpu
->arch
.shared
->srr0
= regs
->srr0
;
570 vcpu
->arch
.shared
->srr1
= regs
->srr1
;
571 vcpu
->arch
.shared
->sprg0
= regs
->sprg0
;
572 vcpu
->arch
.shared
->sprg1
= regs
->sprg1
;
573 vcpu
->arch
.shared
->sprg2
= regs
->sprg2
;
574 vcpu
->arch
.shared
->sprg3
= regs
->sprg3
;
575 vcpu
->arch
.sprg5
= regs
->sprg4
;
576 vcpu
->arch
.sprg6
= regs
->sprg5
;
577 vcpu
->arch
.sprg7
= regs
->sprg6
;
579 for (i
= 0; i
< ARRAY_SIZE(regs
->gpr
); i
++)
580 kvmppc_set_gpr(vcpu
, i
, regs
->gpr
[i
]);
585 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
586 struct kvm_sregs
*sregs
)
591 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
592 struct kvm_sregs
*sregs
)
597 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
602 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
607 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
608 struct kvm_translation
*tr
)
612 r
= kvmppc_core_vcpu_translate(vcpu
, tr
);
616 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
, struct kvm_dirty_log
*log
)
621 int __init
kvmppc_booke_init(void)
623 unsigned long ivor
[16];
624 unsigned long max_ivor
= 0;
627 /* We install our own exception handlers by hijacking IVPR. IVPR must
628 * be 16-bit aligned, so we need a 64KB allocation. */
629 kvmppc_booke_handlers
= __get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
631 if (!kvmppc_booke_handlers
)
634 /* XXX make sure our handlers are smaller than Linux's */
636 /* Copy our interrupt handlers to match host IVORs. That way we don't
637 * have to swap the IVORs on every guest/host transition. */
638 ivor
[0] = mfspr(SPRN_IVOR0
);
639 ivor
[1] = mfspr(SPRN_IVOR1
);
640 ivor
[2] = mfspr(SPRN_IVOR2
);
641 ivor
[3] = mfspr(SPRN_IVOR3
);
642 ivor
[4] = mfspr(SPRN_IVOR4
);
643 ivor
[5] = mfspr(SPRN_IVOR5
);
644 ivor
[6] = mfspr(SPRN_IVOR6
);
645 ivor
[7] = mfspr(SPRN_IVOR7
);
646 ivor
[8] = mfspr(SPRN_IVOR8
);
647 ivor
[9] = mfspr(SPRN_IVOR9
);
648 ivor
[10] = mfspr(SPRN_IVOR10
);
649 ivor
[11] = mfspr(SPRN_IVOR11
);
650 ivor
[12] = mfspr(SPRN_IVOR12
);
651 ivor
[13] = mfspr(SPRN_IVOR13
);
652 ivor
[14] = mfspr(SPRN_IVOR14
);
653 ivor
[15] = mfspr(SPRN_IVOR15
);
655 for (i
= 0; i
< 16; i
++) {
656 if (ivor
[i
] > max_ivor
)
659 memcpy((void *)kvmppc_booke_handlers
+ ivor
[i
],
660 kvmppc_handlers_start
+ i
* kvmppc_handler_len
,
663 flush_icache_range(kvmppc_booke_handlers
,
664 kvmppc_booke_handlers
+ max_ivor
+ kvmppc_handler_len
);
669 void __exit
kvmppc_booke_exit(void)
671 free_pages(kvmppc_booke_handlers
, VCPU_SIZE_ORDER
);