ppc: Create disassemble.h to extract instruction fields
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / powerpc / kvm / emulate.c
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20 #include <linux/jiffies.h>
21 #include <linux/timer.h>
22 #include <linux/types.h>
23 #include <linux/string.h>
24 #include <linux/kvm_host.h>
25
26 #include <asm/dcr.h>
27 #include <asm/dcr-regs.h>
28 #include <asm/time.h>
29 #include <asm/byteorder.h>
30 #include <asm/kvm_ppc.h>
31 #include <asm/disassemble.h>
32
33 static void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
34 {
35 if (vcpu->arch.tcr & TCR_DIE) {
36 /* The decrementer ticks at the same rate as the timebase, so
37 * that's how we convert the guest DEC value to the number of
38 * host ticks. */
39 unsigned long nr_jiffies;
40
41 nr_jiffies = vcpu->arch.dec / tb_ticks_per_jiffy;
42 mod_timer(&vcpu->arch.dec_timer,
43 get_jiffies_64() + nr_jiffies);
44 } else {
45 del_timer(&vcpu->arch.dec_timer);
46 }
47 }
48
49 static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
50 {
51 vcpu->arch.pc = vcpu->arch.srr0;
52 kvmppc_set_msr(vcpu, vcpu->arch.srr1);
53 }
54
55 /* XXX to do:
56 * lhax
57 * lhaux
58 * lswx
59 * lswi
60 * stswx
61 * stswi
62 * lha
63 * lhau
64 * lmw
65 * stmw
66 *
67 * XXX is_bigendian should depend on MMU mapping or MSR[LE]
68 */
69 int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
70 {
71 u32 inst = vcpu->arch.last_inst;
72 u32 ea;
73 int ra;
74 int rb;
75 int rc;
76 int rs;
77 int rt;
78 int ws;
79 int sprn;
80 int dcrn;
81 enum emulation_result emulated = EMULATE_DONE;
82 int advance = 1;
83
84 switch (get_op(inst)) {
85 case 3: /* trap */
86 printk("trap!\n");
87 kvmppc_core_queue_program(vcpu);
88 advance = 0;
89 break;
90
91 case 19:
92 switch (get_xop(inst)) {
93 case 50: /* rfi */
94 kvmppc_emul_rfi(vcpu);
95 advance = 0;
96 break;
97
98 default:
99 emulated = EMULATE_FAIL;
100 break;
101 }
102 break;
103
104 case 31:
105 switch (get_xop(inst)) {
106
107 case 23: /* lwzx */
108 rt = get_rt(inst);
109 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
110 break;
111
112 case 83: /* mfmsr */
113 rt = get_rt(inst);
114 vcpu->arch.gpr[rt] = vcpu->arch.msr;
115 break;
116
117 case 87: /* lbzx */
118 rt = get_rt(inst);
119 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
120 break;
121
122 case 131: /* wrtee */
123 rs = get_rs(inst);
124 vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
125 | (vcpu->arch.gpr[rs] & MSR_EE);
126 break;
127
128 case 146: /* mtmsr */
129 rs = get_rs(inst);
130 kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]);
131 break;
132
133 case 151: /* stwx */
134 rs = get_rs(inst);
135 emulated = kvmppc_handle_store(run, vcpu,
136 vcpu->arch.gpr[rs],
137 4, 1);
138 break;
139
140 case 163: /* wrteei */
141 vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
142 | (inst & MSR_EE);
143 break;
144
145 case 215: /* stbx */
146 rs = get_rs(inst);
147 emulated = kvmppc_handle_store(run, vcpu,
148 vcpu->arch.gpr[rs],
149 1, 1);
150 break;
151
152 case 247: /* stbux */
153 rs = get_rs(inst);
154 ra = get_ra(inst);
155 rb = get_rb(inst);
156
157 ea = vcpu->arch.gpr[rb];
158 if (ra)
159 ea += vcpu->arch.gpr[ra];
160
161 emulated = kvmppc_handle_store(run, vcpu,
162 vcpu->arch.gpr[rs],
163 1, 1);
164 vcpu->arch.gpr[rs] = ea;
165 break;
166
167 case 279: /* lhzx */
168 rt = get_rt(inst);
169 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
170 break;
171
172 case 311: /* lhzux */
173 rt = get_rt(inst);
174 ra = get_ra(inst);
175 rb = get_rb(inst);
176
177 ea = vcpu->arch.gpr[rb];
178 if (ra)
179 ea += vcpu->arch.gpr[ra];
180
181 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
182 vcpu->arch.gpr[ra] = ea;
183 break;
184
185 case 323: /* mfdcr */
186 dcrn = get_dcrn(inst);
187 rt = get_rt(inst);
188
189 /* The guest may access CPR0 registers to determine the timebase
190 * frequency, and it must know the real host frequency because it
191 * can directly access the timebase registers.
192 *
193 * It would be possible to emulate those accesses in userspace,
194 * but userspace can really only figure out the end frequency.
195 * We could decompose that into the factors that compute it, but
196 * that's tricky math, and it's easier to just report the real
197 * CPR0 values.
198 */
199 switch (dcrn) {
200 case DCRN_CPR0_CONFIG_ADDR:
201 vcpu->arch.gpr[rt] = vcpu->arch.cpr0_cfgaddr;
202 break;
203 case DCRN_CPR0_CONFIG_DATA:
204 local_irq_disable();
205 mtdcr(DCRN_CPR0_CONFIG_ADDR,
206 vcpu->arch.cpr0_cfgaddr);
207 vcpu->arch.gpr[rt] = mfdcr(DCRN_CPR0_CONFIG_DATA);
208 local_irq_enable();
209 break;
210 default:
211 run->dcr.dcrn = dcrn;
212 run->dcr.data = 0;
213 run->dcr.is_write = 0;
214 vcpu->arch.io_gpr = rt;
215 vcpu->arch.dcr_needed = 1;
216 emulated = EMULATE_DO_DCR;
217 }
218
219 break;
220
221 case 339: /* mfspr */
222 sprn = get_sprn(inst);
223 rt = get_rt(inst);
224
225 switch (sprn) {
226 case SPRN_SRR0:
227 vcpu->arch.gpr[rt] = vcpu->arch.srr0; break;
228 case SPRN_SRR1:
229 vcpu->arch.gpr[rt] = vcpu->arch.srr1; break;
230 case SPRN_MMUCR:
231 vcpu->arch.gpr[rt] = vcpu->arch.mmucr; break;
232 case SPRN_PID:
233 vcpu->arch.gpr[rt] = vcpu->arch.pid; break;
234 case SPRN_IVPR:
235 vcpu->arch.gpr[rt] = vcpu->arch.ivpr; break;
236 case SPRN_CCR0:
237 vcpu->arch.gpr[rt] = vcpu->arch.ccr0; break;
238 case SPRN_CCR1:
239 vcpu->arch.gpr[rt] = vcpu->arch.ccr1; break;
240 case SPRN_PVR:
241 vcpu->arch.gpr[rt] = vcpu->arch.pvr; break;
242 case SPRN_DEAR:
243 vcpu->arch.gpr[rt] = vcpu->arch.dear; break;
244 case SPRN_ESR:
245 vcpu->arch.gpr[rt] = vcpu->arch.esr; break;
246 case SPRN_DBCR0:
247 vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break;
248 case SPRN_DBCR1:
249 vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break;
250
251 /* Note: mftb and TBRL/TBWL are user-accessible, so
252 * the guest can always access the real TB anyways.
253 * In fact, we probably will never see these traps. */
254 case SPRN_TBWL:
255 vcpu->arch.gpr[rt] = mftbl(); break;
256 case SPRN_TBWU:
257 vcpu->arch.gpr[rt] = mftbu(); break;
258
259 case SPRN_SPRG0:
260 vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break;
261 case SPRN_SPRG1:
262 vcpu->arch.gpr[rt] = vcpu->arch.sprg1; break;
263 case SPRN_SPRG2:
264 vcpu->arch.gpr[rt] = vcpu->arch.sprg2; break;
265 case SPRN_SPRG3:
266 vcpu->arch.gpr[rt] = vcpu->arch.sprg3; break;
267 /* Note: SPRG4-7 are user-readable, so we don't get
268 * a trap. */
269
270 case SPRN_IVOR0:
271 vcpu->arch.gpr[rt] = vcpu->arch.ivor[0]; break;
272 case SPRN_IVOR1:
273 vcpu->arch.gpr[rt] = vcpu->arch.ivor[1]; break;
274 case SPRN_IVOR2:
275 vcpu->arch.gpr[rt] = vcpu->arch.ivor[2]; break;
276 case SPRN_IVOR3:
277 vcpu->arch.gpr[rt] = vcpu->arch.ivor[3]; break;
278 case SPRN_IVOR4:
279 vcpu->arch.gpr[rt] = vcpu->arch.ivor[4]; break;
280 case SPRN_IVOR5:
281 vcpu->arch.gpr[rt] = vcpu->arch.ivor[5]; break;
282 case SPRN_IVOR6:
283 vcpu->arch.gpr[rt] = vcpu->arch.ivor[6]; break;
284 case SPRN_IVOR7:
285 vcpu->arch.gpr[rt] = vcpu->arch.ivor[7]; break;
286 case SPRN_IVOR8:
287 vcpu->arch.gpr[rt] = vcpu->arch.ivor[8]; break;
288 case SPRN_IVOR9:
289 vcpu->arch.gpr[rt] = vcpu->arch.ivor[9]; break;
290 case SPRN_IVOR10:
291 vcpu->arch.gpr[rt] = vcpu->arch.ivor[10]; break;
292 case SPRN_IVOR11:
293 vcpu->arch.gpr[rt] = vcpu->arch.ivor[11]; break;
294 case SPRN_IVOR12:
295 vcpu->arch.gpr[rt] = vcpu->arch.ivor[12]; break;
296 case SPRN_IVOR13:
297 vcpu->arch.gpr[rt] = vcpu->arch.ivor[13]; break;
298 case SPRN_IVOR14:
299 vcpu->arch.gpr[rt] = vcpu->arch.ivor[14]; break;
300 case SPRN_IVOR15:
301 vcpu->arch.gpr[rt] = vcpu->arch.ivor[15]; break;
302
303 default:
304 printk("mfspr: unknown spr %x\n", sprn);
305 vcpu->arch.gpr[rt] = 0;
306 break;
307 }
308 break;
309
310 case 407: /* sthx */
311 rs = get_rs(inst);
312 ra = get_ra(inst);
313 rb = get_rb(inst);
314
315 emulated = kvmppc_handle_store(run, vcpu,
316 vcpu->arch.gpr[rs],
317 2, 1);
318 break;
319
320 case 439: /* sthux */
321 rs = get_rs(inst);
322 ra = get_ra(inst);
323 rb = get_rb(inst);
324
325 ea = vcpu->arch.gpr[rb];
326 if (ra)
327 ea += vcpu->arch.gpr[ra];
328
329 emulated = kvmppc_handle_store(run, vcpu,
330 vcpu->arch.gpr[rs],
331 2, 1);
332 vcpu->arch.gpr[ra] = ea;
333 break;
334
335 case 451: /* mtdcr */
336 dcrn = get_dcrn(inst);
337 rs = get_rs(inst);
338
339 /* emulate some access in kernel */
340 switch (dcrn) {
341 case DCRN_CPR0_CONFIG_ADDR:
342 vcpu->arch.cpr0_cfgaddr = vcpu->arch.gpr[rs];
343 break;
344 default:
345 run->dcr.dcrn = dcrn;
346 run->dcr.data = vcpu->arch.gpr[rs];
347 run->dcr.is_write = 1;
348 vcpu->arch.dcr_needed = 1;
349 emulated = EMULATE_DO_DCR;
350 }
351
352 break;
353
354 case 467: /* mtspr */
355 sprn = get_sprn(inst);
356 rs = get_rs(inst);
357 switch (sprn) {
358 case SPRN_SRR0:
359 vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break;
360 case SPRN_SRR1:
361 vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break;
362 case SPRN_MMUCR:
363 vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break;
364 case SPRN_PID:
365 kvmppc_set_pid(vcpu, vcpu->arch.gpr[rs]); break;
366 case SPRN_CCR0:
367 vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break;
368 case SPRN_CCR1:
369 vcpu->arch.ccr1 = vcpu->arch.gpr[rs]; break;
370 case SPRN_DEAR:
371 vcpu->arch.dear = vcpu->arch.gpr[rs]; break;
372 case SPRN_ESR:
373 vcpu->arch.esr = vcpu->arch.gpr[rs]; break;
374 case SPRN_DBCR0:
375 vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break;
376 case SPRN_DBCR1:
377 vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break;
378
379 /* XXX We need to context-switch the timebase for
380 * watchdog and FIT. */
381 case SPRN_TBWL: break;
382 case SPRN_TBWU: break;
383
384 case SPRN_DEC:
385 vcpu->arch.dec = vcpu->arch.gpr[rs];
386 kvmppc_emulate_dec(vcpu);
387 break;
388
389 case SPRN_TSR:
390 vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break;
391
392 case SPRN_TCR:
393 vcpu->arch.tcr = vcpu->arch.gpr[rs];
394 kvmppc_emulate_dec(vcpu);
395 break;
396
397 case SPRN_SPRG0:
398 vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break;
399 case SPRN_SPRG1:
400 vcpu->arch.sprg1 = vcpu->arch.gpr[rs]; break;
401 case SPRN_SPRG2:
402 vcpu->arch.sprg2 = vcpu->arch.gpr[rs]; break;
403 case SPRN_SPRG3:
404 vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break;
405
406 /* Note: SPRG4-7 are user-readable. These values are
407 * loaded into the real SPRGs when resuming the
408 * guest. */
409 case SPRN_SPRG4:
410 vcpu->arch.sprg4 = vcpu->arch.gpr[rs]; break;
411 case SPRN_SPRG5:
412 vcpu->arch.sprg5 = vcpu->arch.gpr[rs]; break;
413 case SPRN_SPRG6:
414 vcpu->arch.sprg6 = vcpu->arch.gpr[rs]; break;
415 case SPRN_SPRG7:
416 vcpu->arch.sprg7 = vcpu->arch.gpr[rs]; break;
417
418 case SPRN_IVPR:
419 vcpu->arch.ivpr = vcpu->arch.gpr[rs]; break;
420 case SPRN_IVOR0:
421 vcpu->arch.ivor[0] = vcpu->arch.gpr[rs]; break;
422 case SPRN_IVOR1:
423 vcpu->arch.ivor[1] = vcpu->arch.gpr[rs]; break;
424 case SPRN_IVOR2:
425 vcpu->arch.ivor[2] = vcpu->arch.gpr[rs]; break;
426 case SPRN_IVOR3:
427 vcpu->arch.ivor[3] = vcpu->arch.gpr[rs]; break;
428 case SPRN_IVOR4:
429 vcpu->arch.ivor[4] = vcpu->arch.gpr[rs]; break;
430 case SPRN_IVOR5:
431 vcpu->arch.ivor[5] = vcpu->arch.gpr[rs]; break;
432 case SPRN_IVOR6:
433 vcpu->arch.ivor[6] = vcpu->arch.gpr[rs]; break;
434 case SPRN_IVOR7:
435 vcpu->arch.ivor[7] = vcpu->arch.gpr[rs]; break;
436 case SPRN_IVOR8:
437 vcpu->arch.ivor[8] = vcpu->arch.gpr[rs]; break;
438 case SPRN_IVOR9:
439 vcpu->arch.ivor[9] = vcpu->arch.gpr[rs]; break;
440 case SPRN_IVOR10:
441 vcpu->arch.ivor[10] = vcpu->arch.gpr[rs]; break;
442 case SPRN_IVOR11:
443 vcpu->arch.ivor[11] = vcpu->arch.gpr[rs]; break;
444 case SPRN_IVOR12:
445 vcpu->arch.ivor[12] = vcpu->arch.gpr[rs]; break;
446 case SPRN_IVOR13:
447 vcpu->arch.ivor[13] = vcpu->arch.gpr[rs]; break;
448 case SPRN_IVOR14:
449 vcpu->arch.ivor[14] = vcpu->arch.gpr[rs]; break;
450 case SPRN_IVOR15:
451 vcpu->arch.ivor[15] = vcpu->arch.gpr[rs]; break;
452
453 default:
454 printk("mtspr: unknown spr %x\n", sprn);
455 emulated = EMULATE_FAIL;
456 break;
457 }
458 break;
459
460 case 470: /* dcbi */
461 /* Do nothing. The guest is performing dcbi because
462 * hardware DMA is not snooped by the dcache, but
463 * emulated DMA either goes through the dcache as
464 * normal writes, or the host kernel has handled dcache
465 * coherence. */
466 break;
467
468 case 534: /* lwbrx */
469 rt = get_rt(inst);
470 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
471 break;
472
473 case 566: /* tlbsync */
474 break;
475
476 case 662: /* stwbrx */
477 rs = get_rs(inst);
478 ra = get_ra(inst);
479 rb = get_rb(inst);
480
481 emulated = kvmppc_handle_store(run, vcpu,
482 vcpu->arch.gpr[rs],
483 4, 0);
484 break;
485
486 case 978: /* tlbwe */
487 ra = get_ra(inst);
488 rs = get_rs(inst);
489 ws = get_ws(inst);
490 emulated = kvmppc_emul_tlbwe(vcpu, ra, rs, ws);
491 break;
492
493 case 914: /* tlbsx */
494 rt = get_rt(inst);
495 ra = get_ra(inst);
496 rb = get_rb(inst);
497 rc = get_rc(inst);
498 emulated = kvmppc_emul_tlbsx(vcpu, rt, ra, rb, rc);
499 break;
500
501 case 790: /* lhbrx */
502 rt = get_rt(inst);
503 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
504 break;
505
506 case 918: /* sthbrx */
507 rs = get_rs(inst);
508 ra = get_ra(inst);
509 rb = get_rb(inst);
510
511 emulated = kvmppc_handle_store(run, vcpu,
512 vcpu->arch.gpr[rs],
513 2, 0);
514 break;
515
516 case 966: /* iccci */
517 break;
518
519 default:
520 printk("unknown: op %d xop %d\n", get_op(inst),
521 get_xop(inst));
522 emulated = EMULATE_FAIL;
523 break;
524 }
525 break;
526
527 case 32: /* lwz */
528 rt = get_rt(inst);
529 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
530 break;
531
532 case 33: /* lwzu */
533 ra = get_ra(inst);
534 rt = get_rt(inst);
535 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
536 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
537 break;
538
539 case 34: /* lbz */
540 rt = get_rt(inst);
541 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
542 break;
543
544 case 35: /* lbzu */
545 ra = get_ra(inst);
546 rt = get_rt(inst);
547 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
548 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
549 break;
550
551 case 36: /* stw */
552 rs = get_rs(inst);
553 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
554 4, 1);
555 break;
556
557 case 37: /* stwu */
558 ra = get_ra(inst);
559 rs = get_rs(inst);
560 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
561 4, 1);
562 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
563 break;
564
565 case 38: /* stb */
566 rs = get_rs(inst);
567 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
568 1, 1);
569 break;
570
571 case 39: /* stbu */
572 ra = get_ra(inst);
573 rs = get_rs(inst);
574 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
575 1, 1);
576 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
577 break;
578
579 case 40: /* lhz */
580 rt = get_rt(inst);
581 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
582 break;
583
584 case 41: /* lhzu */
585 ra = get_ra(inst);
586 rt = get_rt(inst);
587 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
588 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
589 break;
590
591 case 44: /* sth */
592 rs = get_rs(inst);
593 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
594 2, 1);
595 break;
596
597 case 45: /* sthu */
598 ra = get_ra(inst);
599 rs = get_rs(inst);
600 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
601 2, 1);
602 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
603 break;
604
605 default:
606 printk("unknown op %d\n", get_op(inst));
607 emulated = EMULATE_FAIL;
608 break;
609 }
610
611 KVMTRACE_3D(PPC_INSTR, vcpu, inst, vcpu->arch.pc, emulated, entryexit);
612
613 if (advance)
614 vcpu->arch.pc += 4; /* Advance past emulated instruction. */
615
616 return emulated;
617 }