2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
20 #include <linux/jiffies.h>
21 #include <linux/timer.h>
22 #include <linux/types.h>
23 #include <linux/string.h>
24 #include <linux/kvm_host.h>
27 #include <asm/dcr-regs.h>
29 #include <asm/byteorder.h>
30 #include <asm/kvm_ppc.h>
31 #include <asm/disassemble.h>
33 static void kvmppc_emulate_dec(struct kvm_vcpu
*vcpu
)
35 if (vcpu
->arch
.tcr
& TCR_DIE
) {
36 /* The decrementer ticks at the same rate as the timebase, so
37 * that's how we convert the guest DEC value to the number of
39 unsigned long nr_jiffies
;
41 nr_jiffies
= vcpu
->arch
.dec
/ tb_ticks_per_jiffy
;
42 mod_timer(&vcpu
->arch
.dec_timer
,
43 get_jiffies_64() + nr_jiffies
);
45 del_timer(&vcpu
->arch
.dec_timer
);
49 static void kvmppc_emul_rfi(struct kvm_vcpu
*vcpu
)
51 vcpu
->arch
.pc
= vcpu
->arch
.srr0
;
52 kvmppc_set_msr(vcpu
, vcpu
->arch
.srr1
);
67 * XXX is_bigendian should depend on MMU mapping or MSR[LE]
69 int kvmppc_emulate_instruction(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
71 u32 inst
= vcpu
->arch
.last_inst
;
81 enum emulation_result emulated
= EMULATE_DONE
;
84 switch (get_op(inst
)) {
87 kvmppc_core_queue_program(vcpu
);
92 switch (get_xop(inst
)) {
94 kvmppc_emul_rfi(vcpu
);
99 emulated
= EMULATE_FAIL
;
105 switch (get_xop(inst
)) {
109 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 4, 1);
114 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.msr
;
119 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 1, 1);
122 case 131: /* wrtee */
124 vcpu
->arch
.msr
= (vcpu
->arch
.msr
& ~MSR_EE
)
125 | (vcpu
->arch
.gpr
[rs
] & MSR_EE
);
128 case 146: /* mtmsr */
130 kvmppc_set_msr(vcpu
, vcpu
->arch
.gpr
[rs
]);
135 emulated
= kvmppc_handle_store(run
, vcpu
,
140 case 163: /* wrteei */
141 vcpu
->arch
.msr
= (vcpu
->arch
.msr
& ~MSR_EE
)
147 emulated
= kvmppc_handle_store(run
, vcpu
,
152 case 247: /* stbux */
157 ea
= vcpu
->arch
.gpr
[rb
];
159 ea
+= vcpu
->arch
.gpr
[ra
];
161 emulated
= kvmppc_handle_store(run
, vcpu
,
164 vcpu
->arch
.gpr
[rs
] = ea
;
169 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 2, 1);
172 case 311: /* lhzux */
177 ea
= vcpu
->arch
.gpr
[rb
];
179 ea
+= vcpu
->arch
.gpr
[ra
];
181 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 2, 1);
182 vcpu
->arch
.gpr
[ra
] = ea
;
185 case 323: /* mfdcr */
186 dcrn
= get_dcrn(inst
);
189 /* The guest may access CPR0 registers to determine the timebase
190 * frequency, and it must know the real host frequency because it
191 * can directly access the timebase registers.
193 * It would be possible to emulate those accesses in userspace,
194 * but userspace can really only figure out the end frequency.
195 * We could decompose that into the factors that compute it, but
196 * that's tricky math, and it's easier to just report the real
200 case DCRN_CPR0_CONFIG_ADDR
:
201 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.cpr0_cfgaddr
;
203 case DCRN_CPR0_CONFIG_DATA
:
205 mtdcr(DCRN_CPR0_CONFIG_ADDR
,
206 vcpu
->arch
.cpr0_cfgaddr
);
207 vcpu
->arch
.gpr
[rt
] = mfdcr(DCRN_CPR0_CONFIG_DATA
);
211 run
->dcr
.dcrn
= dcrn
;
213 run
->dcr
.is_write
= 0;
214 vcpu
->arch
.io_gpr
= rt
;
215 vcpu
->arch
.dcr_needed
= 1;
216 emulated
= EMULATE_DO_DCR
;
221 case 339: /* mfspr */
222 sprn
= get_sprn(inst
);
227 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.srr0
; break;
229 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.srr1
; break;
231 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.mmucr
; break;
233 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.pid
; break;
235 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.ivpr
; break;
237 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.ccr0
; break;
239 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.ccr1
; break;
241 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.pvr
; break;
243 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.dear
; break;
245 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.esr
; break;
247 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.dbcr0
; break;
249 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.dbcr1
; break;
251 /* Note: mftb and TBRL/TBWL are user-accessible, so
252 * the guest can always access the real TB anyways.
253 * In fact, we probably will never see these traps. */
255 vcpu
->arch
.gpr
[rt
] = mftbl(); break;
257 vcpu
->arch
.gpr
[rt
] = mftbu(); break;
260 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.sprg0
; break;
262 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.sprg1
; break;
264 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.sprg2
; break;
266 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.sprg3
; break;
267 /* Note: SPRG4-7 are user-readable, so we don't get
271 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.ivor
[0]; break;
273 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.ivor
[1]; break;
275 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.ivor
[2]; break;
277 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.ivor
[3]; break;
279 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.ivor
[4]; break;
281 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.ivor
[5]; break;
283 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.ivor
[6]; break;
285 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.ivor
[7]; break;
287 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.ivor
[8]; break;
289 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.ivor
[9]; break;
291 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.ivor
[10]; break;
293 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.ivor
[11]; break;
295 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.ivor
[12]; break;
297 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.ivor
[13]; break;
299 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.ivor
[14]; break;
301 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.ivor
[15]; break;
304 printk("mfspr: unknown spr %x\n", sprn
);
305 vcpu
->arch
.gpr
[rt
] = 0;
315 emulated
= kvmppc_handle_store(run
, vcpu
,
320 case 439: /* sthux */
325 ea
= vcpu
->arch
.gpr
[rb
];
327 ea
+= vcpu
->arch
.gpr
[ra
];
329 emulated
= kvmppc_handle_store(run
, vcpu
,
332 vcpu
->arch
.gpr
[ra
] = ea
;
335 case 451: /* mtdcr */
336 dcrn
= get_dcrn(inst
);
339 /* emulate some access in kernel */
341 case DCRN_CPR0_CONFIG_ADDR
:
342 vcpu
->arch
.cpr0_cfgaddr
= vcpu
->arch
.gpr
[rs
];
345 run
->dcr
.dcrn
= dcrn
;
346 run
->dcr
.data
= vcpu
->arch
.gpr
[rs
];
347 run
->dcr
.is_write
= 1;
348 vcpu
->arch
.dcr_needed
= 1;
349 emulated
= EMULATE_DO_DCR
;
354 case 467: /* mtspr */
355 sprn
= get_sprn(inst
);
359 vcpu
->arch
.srr0
= vcpu
->arch
.gpr
[rs
]; break;
361 vcpu
->arch
.srr1
= vcpu
->arch
.gpr
[rs
]; break;
363 vcpu
->arch
.mmucr
= vcpu
->arch
.gpr
[rs
]; break;
365 kvmppc_set_pid(vcpu
, vcpu
->arch
.gpr
[rs
]); break;
367 vcpu
->arch
.ccr0
= vcpu
->arch
.gpr
[rs
]; break;
369 vcpu
->arch
.ccr1
= vcpu
->arch
.gpr
[rs
]; break;
371 vcpu
->arch
.dear
= vcpu
->arch
.gpr
[rs
]; break;
373 vcpu
->arch
.esr
= vcpu
->arch
.gpr
[rs
]; break;
375 vcpu
->arch
.dbcr0
= vcpu
->arch
.gpr
[rs
]; break;
377 vcpu
->arch
.dbcr1
= vcpu
->arch
.gpr
[rs
]; break;
379 /* XXX We need to context-switch the timebase for
380 * watchdog and FIT. */
381 case SPRN_TBWL
: break;
382 case SPRN_TBWU
: break;
385 vcpu
->arch
.dec
= vcpu
->arch
.gpr
[rs
];
386 kvmppc_emulate_dec(vcpu
);
390 vcpu
->arch
.tsr
&= ~vcpu
->arch
.gpr
[rs
]; break;
393 vcpu
->arch
.tcr
= vcpu
->arch
.gpr
[rs
];
394 kvmppc_emulate_dec(vcpu
);
398 vcpu
->arch
.sprg0
= vcpu
->arch
.gpr
[rs
]; break;
400 vcpu
->arch
.sprg1
= vcpu
->arch
.gpr
[rs
]; break;
402 vcpu
->arch
.sprg2
= vcpu
->arch
.gpr
[rs
]; break;
404 vcpu
->arch
.sprg3
= vcpu
->arch
.gpr
[rs
]; break;
406 /* Note: SPRG4-7 are user-readable. These values are
407 * loaded into the real SPRGs when resuming the
410 vcpu
->arch
.sprg4
= vcpu
->arch
.gpr
[rs
]; break;
412 vcpu
->arch
.sprg5
= vcpu
->arch
.gpr
[rs
]; break;
414 vcpu
->arch
.sprg6
= vcpu
->arch
.gpr
[rs
]; break;
416 vcpu
->arch
.sprg7
= vcpu
->arch
.gpr
[rs
]; break;
419 vcpu
->arch
.ivpr
= vcpu
->arch
.gpr
[rs
]; break;
421 vcpu
->arch
.ivor
[0] = vcpu
->arch
.gpr
[rs
]; break;
423 vcpu
->arch
.ivor
[1] = vcpu
->arch
.gpr
[rs
]; break;
425 vcpu
->arch
.ivor
[2] = vcpu
->arch
.gpr
[rs
]; break;
427 vcpu
->arch
.ivor
[3] = vcpu
->arch
.gpr
[rs
]; break;
429 vcpu
->arch
.ivor
[4] = vcpu
->arch
.gpr
[rs
]; break;
431 vcpu
->arch
.ivor
[5] = vcpu
->arch
.gpr
[rs
]; break;
433 vcpu
->arch
.ivor
[6] = vcpu
->arch
.gpr
[rs
]; break;
435 vcpu
->arch
.ivor
[7] = vcpu
->arch
.gpr
[rs
]; break;
437 vcpu
->arch
.ivor
[8] = vcpu
->arch
.gpr
[rs
]; break;
439 vcpu
->arch
.ivor
[9] = vcpu
->arch
.gpr
[rs
]; break;
441 vcpu
->arch
.ivor
[10] = vcpu
->arch
.gpr
[rs
]; break;
443 vcpu
->arch
.ivor
[11] = vcpu
->arch
.gpr
[rs
]; break;
445 vcpu
->arch
.ivor
[12] = vcpu
->arch
.gpr
[rs
]; break;
447 vcpu
->arch
.ivor
[13] = vcpu
->arch
.gpr
[rs
]; break;
449 vcpu
->arch
.ivor
[14] = vcpu
->arch
.gpr
[rs
]; break;
451 vcpu
->arch
.ivor
[15] = vcpu
->arch
.gpr
[rs
]; break;
454 printk("mtspr: unknown spr %x\n", sprn
);
455 emulated
= EMULATE_FAIL
;
461 /* Do nothing. The guest is performing dcbi because
462 * hardware DMA is not snooped by the dcache, but
463 * emulated DMA either goes through the dcache as
464 * normal writes, or the host kernel has handled dcache
468 case 534: /* lwbrx */
470 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 4, 0);
473 case 566: /* tlbsync */
476 case 662: /* stwbrx */
481 emulated
= kvmppc_handle_store(run
, vcpu
,
486 case 978: /* tlbwe */
490 emulated
= kvmppc_emul_tlbwe(vcpu
, ra
, rs
, ws
);
493 case 914: /* tlbsx */
498 emulated
= kvmppc_emul_tlbsx(vcpu
, rt
, ra
, rb
, rc
);
501 case 790: /* lhbrx */
503 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 2, 0);
506 case 918: /* sthbrx */
511 emulated
= kvmppc_handle_store(run
, vcpu
,
516 case 966: /* iccci */
520 printk("unknown: op %d xop %d\n", get_op(inst
),
522 emulated
= EMULATE_FAIL
;
529 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 4, 1);
535 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 4, 1);
536 vcpu
->arch
.gpr
[ra
] = vcpu
->arch
.paddr_accessed
;
541 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 1, 1);
547 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 1, 1);
548 vcpu
->arch
.gpr
[ra
] = vcpu
->arch
.paddr_accessed
;
553 emulated
= kvmppc_handle_store(run
, vcpu
, vcpu
->arch
.gpr
[rs
],
560 emulated
= kvmppc_handle_store(run
, vcpu
, vcpu
->arch
.gpr
[rs
],
562 vcpu
->arch
.gpr
[ra
] = vcpu
->arch
.paddr_accessed
;
567 emulated
= kvmppc_handle_store(run
, vcpu
, vcpu
->arch
.gpr
[rs
],
574 emulated
= kvmppc_handle_store(run
, vcpu
, vcpu
->arch
.gpr
[rs
],
576 vcpu
->arch
.gpr
[ra
] = vcpu
->arch
.paddr_accessed
;
581 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 2, 1);
587 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 2, 1);
588 vcpu
->arch
.gpr
[ra
] = vcpu
->arch
.paddr_accessed
;
593 emulated
= kvmppc_handle_store(run
, vcpu
, vcpu
->arch
.gpr
[rs
],
600 emulated
= kvmppc_handle_store(run
, vcpu
, vcpu
->arch
.gpr
[rs
],
602 vcpu
->arch
.gpr
[ra
] = vcpu
->arch
.paddr_accessed
;
606 printk("unknown op %d\n", get_op(inst
));
607 emulated
= EMULATE_FAIL
;
611 KVMTRACE_3D(PPC_INSTR
, vcpu
, inst
, vcpu
->arch
.pc
, emulated
, entryexit
);
614 vcpu
->arch
.pc
+= 4; /* Advance past emulated instruction. */