KVM: struct kvm_memory_slot.user_alloc -> bool
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
a0616cde 30#include <asm/switch_to.h>
1526bf9c 31#include <asm/sclp.h>
8f2abe6a 32#include "kvm-s390.h"
b0c632db
HC
33#include "gaccess.h"
34
5786fffa
CH
35#define CREATE_TRACE_POINTS
36#include "trace.h"
ade38c31 37#include "trace-s390.h"
5786fffa 38
b0c632db
HC
39#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
40
41struct kvm_stats_debugfs_item debugfs_entries[] = {
42 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 43 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
44 { "exit_validity", VCPU_STAT(exit_validity) },
45 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
46 { "exit_external_request", VCPU_STAT(exit_external_request) },
47 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
48 { "exit_instruction", VCPU_STAT(exit_instruction) },
49 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
50 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 51 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
52 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
53 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 54 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
55 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
56 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
57 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
58 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
59 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
60 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
61 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
62 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
63 { "instruction_spx", VCPU_STAT(instruction_spx) },
64 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
65 { "instruction_stap", VCPU_STAT(instruction_stap) },
66 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
67 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
68 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
69 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
70 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 71 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 72 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 73 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 74 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
75 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
76 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
77 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
78 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
79 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 80 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 81 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 82 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
83 { NULL }
84};
85
ef50f7ac 86static unsigned long long *facilities;
b0c632db
HC
87
88/* Section: not file related */
10474ae8 89int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
90{
91 /* every s390 is virtualization enabled ;-) */
10474ae8 92 return 0;
b0c632db
HC
93}
94
95void kvm_arch_hardware_disable(void *garbage)
96{
97}
98
b0c632db
HC
99int kvm_arch_hardware_setup(void)
100{
101 return 0;
102}
103
104void kvm_arch_hardware_unsetup(void)
105{
106}
107
108void kvm_arch_check_processor_compat(void *rtn)
109{
110}
111
112int kvm_arch_init(void *opaque)
113{
114 return 0;
115}
116
117void kvm_arch_exit(void)
118{
119}
120
121/* Section: device related */
122long kvm_arch_dev_ioctl(struct file *filp,
123 unsigned int ioctl, unsigned long arg)
124{
125 if (ioctl == KVM_S390_ENABLE_SIE)
126 return s390_enable_sie();
127 return -EINVAL;
128}
129
130int kvm_dev_ioctl_check_extension(long ext)
131{
d7b0b5eb
CO
132 int r;
133
2bd0ac4e 134 switch (ext) {
d7b0b5eb 135 case KVM_CAP_S390_PSW:
b6cf8788 136 case KVM_CAP_S390_GMAP:
52e16b18 137 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
138#ifdef CONFIG_KVM_S390_UCONTROL
139 case KVM_CAP_S390_UCONTROL:
140#endif
60b413c9 141 case KVM_CAP_SYNC_REGS:
14eebd91 142 case KVM_CAP_ONE_REG:
d7b0b5eb
CO
143 r = 1;
144 break;
e726b1bd
CB
145 case KVM_CAP_NR_VCPUS:
146 case KVM_CAP_MAX_VCPUS:
147 r = KVM_MAX_VCPUS;
148 break;
1526bf9c
CB
149 case KVM_CAP_S390_COW:
150 r = sclp_get_fac85() & 0x2;
151 break;
2bd0ac4e 152 default:
d7b0b5eb 153 r = 0;
2bd0ac4e 154 }
d7b0b5eb 155 return r;
b0c632db
HC
156}
157
158/* Section: vm related */
159/*
160 * Get (and clear) the dirty memory log for a memory slot.
161 */
162int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
163 struct kvm_dirty_log *log)
164{
165 return 0;
166}
167
168long kvm_arch_vm_ioctl(struct file *filp,
169 unsigned int ioctl, unsigned long arg)
170{
171 struct kvm *kvm = filp->private_data;
172 void __user *argp = (void __user *)arg;
173 int r;
174
175 switch (ioctl) {
ba5c1e9b
CO
176 case KVM_S390_INTERRUPT: {
177 struct kvm_s390_interrupt s390int;
178
179 r = -EFAULT;
180 if (copy_from_user(&s390int, argp, sizeof(s390int)))
181 break;
182 r = kvm_s390_inject_vm(kvm, &s390int);
183 break;
184 }
b0c632db 185 default:
367e1319 186 r = -ENOTTY;
b0c632db
HC
187 }
188
189 return r;
190}
191
e08b9637 192int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 193{
b0c632db
HC
194 int rc;
195 char debug_name[16];
196
e08b9637
CO
197 rc = -EINVAL;
198#ifdef CONFIG_KVM_S390_UCONTROL
199 if (type & ~KVM_VM_S390_UCONTROL)
200 goto out_err;
201 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
202 goto out_err;
203#else
204 if (type)
205 goto out_err;
206#endif
207
b0c632db
HC
208 rc = s390_enable_sie();
209 if (rc)
d89f5eff 210 goto out_err;
b0c632db 211
b290411a
CO
212 rc = -ENOMEM;
213
b0c632db
HC
214 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
215 if (!kvm->arch.sca)
d89f5eff 216 goto out_err;
b0c632db
HC
217
218 sprintf(debug_name, "kvm-%u", current->pid);
219
220 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
221 if (!kvm->arch.dbf)
222 goto out_nodbf;
223
ba5c1e9b
CO
224 spin_lock_init(&kvm->arch.float_int.lock);
225 INIT_LIST_HEAD(&kvm->arch.float_int.list);
226
b0c632db
HC
227 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
228 VM_EVENT(kvm, 3, "%s", "vm created");
229
e08b9637
CO
230 if (type & KVM_VM_S390_UCONTROL) {
231 kvm->arch.gmap = NULL;
232 } else {
233 kvm->arch.gmap = gmap_alloc(current->mm);
234 if (!kvm->arch.gmap)
235 goto out_nogmap;
236 }
d89f5eff 237 return 0;
598841ca
CO
238out_nogmap:
239 debug_unregister(kvm->arch.dbf);
b0c632db
HC
240out_nodbf:
241 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
242out_err:
243 return rc;
b0c632db
HC
244}
245
d329c035
CB
246void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
247{
248 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 249 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
58f9460b
CO
250 if (!kvm_is_ucontrol(vcpu->kvm)) {
251 clear_bit(63 - vcpu->vcpu_id,
252 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
253 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
254 (__u64) vcpu->arch.sie_block)
255 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
256 }
abf4a71e 257 smp_mb();
27e0393f
CO
258
259 if (kvm_is_ucontrol(vcpu->kvm))
260 gmap_free(vcpu->arch.gmap);
261
d329c035 262 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 263 kvm_vcpu_uninit(vcpu);
d329c035
CB
264 kfree(vcpu);
265}
266
267static void kvm_free_vcpus(struct kvm *kvm)
268{
269 unsigned int i;
988a2cae 270 struct kvm_vcpu *vcpu;
d329c035 271
988a2cae
GN
272 kvm_for_each_vcpu(i, vcpu, kvm)
273 kvm_arch_vcpu_destroy(vcpu);
274
275 mutex_lock(&kvm->lock);
276 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
277 kvm->vcpus[i] = NULL;
278
279 atomic_set(&kvm->online_vcpus, 0);
280 mutex_unlock(&kvm->lock);
d329c035
CB
281}
282
ad8ba2cd
SY
283void kvm_arch_sync_events(struct kvm *kvm)
284{
285}
286
b0c632db
HC
287void kvm_arch_destroy_vm(struct kvm *kvm)
288{
d329c035 289 kvm_free_vcpus(kvm);
b0c632db 290 free_page((unsigned long)(kvm->arch.sca));
d329c035 291 debug_unregister(kvm->arch.dbf);
27e0393f
CO
292 if (!kvm_is_ucontrol(kvm))
293 gmap_free(kvm->arch.gmap);
b0c632db
HC
294}
295
296/* Section: vcpu related */
297int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
298{
27e0393f
CO
299 if (kvm_is_ucontrol(vcpu->kvm)) {
300 vcpu->arch.gmap = gmap_alloc(current->mm);
301 if (!vcpu->arch.gmap)
302 return -ENOMEM;
303 return 0;
304 }
305
598841ca 306 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
59674c1a
CB
307 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
308 KVM_SYNC_GPRS |
9eed0735
CB
309 KVM_SYNC_ACRS |
310 KVM_SYNC_CRS;
b0c632db
HC
311 return 0;
312}
313
314void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
315{
6692cef3 316 /* Nothing todo */
b0c632db
HC
317}
318
319void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
320{
321 save_fp_regs(&vcpu->arch.host_fpregs);
322 save_access_regs(vcpu->arch.host_acrs);
323 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
324 restore_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 325 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 326 gmap_enable(vcpu->arch.gmap);
9e6dabef 327 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
328}
329
330void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
331{
9e6dabef 332 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 333 gmap_disable(vcpu->arch.gmap);
b0c632db 334 save_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 335 save_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
336 restore_fp_regs(&vcpu->arch.host_fpregs);
337 restore_access_regs(vcpu->arch.host_acrs);
338}
339
340static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
341{
342 /* this equals initial cpu reset in pop, but we don't switch to ESA */
343 vcpu->arch.sie_block->gpsw.mask = 0UL;
344 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 345 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
346 vcpu->arch.sie_block->cputm = 0UL;
347 vcpu->arch.sie_block->ckc = 0UL;
348 vcpu->arch.sie_block->todpr = 0;
349 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
350 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
351 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
352 vcpu->arch.guest_fpregs.fpc = 0;
353 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
354 vcpu->arch.sie_block->gbea = 1;
61bde82c 355 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
356}
357
42897d86
MT
358int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
359{
360 return 0;
361}
362
b0c632db
HC
363int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
364{
9e6dabef
CH
365 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
366 CPUSTAT_SM |
367 CPUSTAT_STOPPED);
fc34531d 368 vcpu->arch.sie_block->ecb = 6;
b0c632db 369 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 370 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
371 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
372 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
373 (unsigned long) vcpu);
374 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 375 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 376 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
377 return 0;
378}
379
380struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
381 unsigned int id)
382{
4d47555a
CO
383 struct kvm_vcpu *vcpu;
384 int rc = -EINVAL;
385
386 if (id >= KVM_MAX_VCPUS)
387 goto out;
388
389 rc = -ENOMEM;
b0c632db 390
4d47555a 391 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
b0c632db 392 if (!vcpu)
4d47555a 393 goto out;
b0c632db 394
180c12fb
CB
395 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
396 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
397
398 if (!vcpu->arch.sie_block)
399 goto out_free_cpu;
400
401 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
402 if (!kvm_is_ucontrol(kvm)) {
403 if (!kvm->arch.sca) {
404 WARN_ON_ONCE(1);
405 goto out_free_cpu;
406 }
407 if (!kvm->arch.sca->cpu[id].sda)
408 kvm->arch.sca->cpu[id].sda =
409 (__u64) vcpu->arch.sie_block;
410 vcpu->arch.sie_block->scaoh =
411 (__u32)(((__u64)kvm->arch.sca) >> 32);
412 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
413 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
414 }
b0c632db 415
ba5c1e9b
CO
416 spin_lock_init(&vcpu->arch.local_int.lock);
417 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
418 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 419 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
420 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
421 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 422 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 423 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 424
b0c632db
HC
425 rc = kvm_vcpu_init(vcpu, kvm, id);
426 if (rc)
7b06bf2f 427 goto out_free_sie_block;
b0c632db
HC
428 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
429 vcpu->arch.sie_block);
ade38c31 430 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 431
b0c632db 432 return vcpu;
7b06bf2f
WY
433out_free_sie_block:
434 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db
HC
435out_free_cpu:
436 kfree(vcpu);
4d47555a 437out:
b0c632db
HC
438 return ERR_PTR(rc);
439}
440
b0c632db
HC
441int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
442{
443 /* kvm common code refers to this, but never calls it */
444 BUG();
445 return 0;
446}
447
b6d33834
CD
448int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
449{
450 /* kvm common code refers to this, but never calls it */
451 BUG();
452 return 0;
453}
454
14eebd91
CO
455static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
456 struct kvm_one_reg *reg)
457{
458 int r = -EINVAL;
459
460 switch (reg->id) {
29b7c71b
CO
461 case KVM_REG_S390_TODPR:
462 r = put_user(vcpu->arch.sie_block->todpr,
463 (u32 __user *)reg->addr);
464 break;
465 case KVM_REG_S390_EPOCHDIFF:
466 r = put_user(vcpu->arch.sie_block->epoch,
467 (u64 __user *)reg->addr);
468 break;
46a6dd1c
J
469 case KVM_REG_S390_CPU_TIMER:
470 r = put_user(vcpu->arch.sie_block->cputm,
471 (u64 __user *)reg->addr);
472 break;
473 case KVM_REG_S390_CLOCK_COMP:
474 r = put_user(vcpu->arch.sie_block->ckc,
475 (u64 __user *)reg->addr);
476 break;
14eebd91
CO
477 default:
478 break;
479 }
480
481 return r;
482}
483
484static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
485 struct kvm_one_reg *reg)
486{
487 int r = -EINVAL;
488
489 switch (reg->id) {
29b7c71b
CO
490 case KVM_REG_S390_TODPR:
491 r = get_user(vcpu->arch.sie_block->todpr,
492 (u32 __user *)reg->addr);
493 break;
494 case KVM_REG_S390_EPOCHDIFF:
495 r = get_user(vcpu->arch.sie_block->epoch,
496 (u64 __user *)reg->addr);
497 break;
46a6dd1c
J
498 case KVM_REG_S390_CPU_TIMER:
499 r = get_user(vcpu->arch.sie_block->cputm,
500 (u64 __user *)reg->addr);
501 break;
502 case KVM_REG_S390_CLOCK_COMP:
503 r = get_user(vcpu->arch.sie_block->ckc,
504 (u64 __user *)reg->addr);
505 break;
14eebd91
CO
506 default:
507 break;
508 }
509
510 return r;
511}
b6d33834 512
b0c632db
HC
513static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
514{
b0c632db 515 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
516 return 0;
517}
518
519int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
520{
5a32c1af 521 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
522 return 0;
523}
524
525int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
526{
5a32c1af 527 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
528 return 0;
529}
530
531int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
532 struct kvm_sregs *sregs)
533{
59674c1a 534 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 535 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 536 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
537 return 0;
538}
539
540int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
541 struct kvm_sregs *sregs)
542{
59674c1a 543 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 544 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
545 return 0;
546}
547
548int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
549{
b0c632db 550 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
85175587 551 vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
7eef87dc 552 restore_fp_regs(&vcpu->arch.guest_fpregs);
b0c632db
HC
553 return 0;
554}
555
556int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
557{
b0c632db
HC
558 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
559 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
560 return 0;
561}
562
563static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
564{
565 int rc = 0;
566
9e6dabef 567 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 568 rc = -EBUSY;
d7b0b5eb
CO
569 else {
570 vcpu->run->psw_mask = psw.mask;
571 vcpu->run->psw_addr = psw.addr;
572 }
b0c632db
HC
573 return rc;
574}
575
576int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
577 struct kvm_translation *tr)
578{
579 return -EINVAL; /* not implemented yet */
580}
581
d0bfb940
JK
582int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
583 struct kvm_guest_debug *dbg)
b0c632db
HC
584{
585 return -EINVAL; /* not implemented yet */
586}
587
62d9f0db
MT
588int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
589 struct kvm_mp_state *mp_state)
590{
591 return -EINVAL; /* not implemented yet */
592}
593
594int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
595 struct kvm_mp_state *mp_state)
596{
597 return -EINVAL; /* not implemented yet */
598}
599
e168bf8d 600static int __vcpu_run(struct kvm_vcpu *vcpu)
b0c632db 601{
e168bf8d
CO
602 int rc;
603
5a32c1af 604 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
605
606 if (need_resched())
607 schedule();
608
71cde587
CB
609 if (test_thread_flag(TIF_MCCK_PENDING))
610 s390_handle_mcck();
611
d6b6d166
CO
612 if (!kvm_is_ucontrol(vcpu->kvm))
613 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 614
b0c632db 615 vcpu->arch.sie_block->icptcode = 0;
b0c632db 616 kvm_guest_enter();
b0c632db
HC
617 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
618 atomic_read(&vcpu->arch.sie_block->cpuflags));
5786fffa
CH
619 trace_kvm_s390_sie_enter(vcpu,
620 atomic_read(&vcpu->arch.sie_block->cpuflags));
5a32c1af 621 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
e168bf8d
CO
622 if (rc) {
623 if (kvm_is_ucontrol(vcpu->kvm)) {
624 rc = SIE_INTERCEPT_UCONTROL;
625 } else {
626 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
5786fffa 627 trace_kvm_s390_sie_fault(vcpu);
e168bf8d
CO
628 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
629 rc = 0;
630 }
1f0d0f09 631 }
b0c632db
HC
632 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
633 vcpu->arch.sie_block->icptcode);
5786fffa 634 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
b0c632db 635 kvm_guest_exit();
b0c632db 636
5a32c1af 637 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
e168bf8d 638 return rc;
b0c632db
HC
639}
640
641int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
642{
8f2abe6a 643 int rc;
b0c632db
HC
644 sigset_t sigsaved;
645
9ace903d 646rerun_vcpu:
b0c632db
HC
647 if (vcpu->sigset_active)
648 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
649
9e6dabef 650 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 651
ba5c1e9b
CO
652 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
653
8f2abe6a
CB
654 switch (kvm_run->exit_reason) {
655 case KVM_EXIT_S390_SIEIC:
8f2abe6a 656 case KVM_EXIT_UNKNOWN:
9ace903d 657 case KVM_EXIT_INTR:
8f2abe6a 658 case KVM_EXIT_S390_RESET:
e168bf8d 659 case KVM_EXIT_S390_UCONTROL:
8f2abe6a
CB
660 break;
661 default:
662 BUG();
663 }
664
d7b0b5eb
CO
665 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
666 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
60b413c9
CB
667 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
668 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
669 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
670 }
9eed0735
CB
671 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
672 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
673 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
674 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
675 }
d7b0b5eb 676
dab4079d 677 might_fault();
8f2abe6a
CB
678
679 do {
e168bf8d
CO
680 rc = __vcpu_run(vcpu);
681 if (rc)
682 break;
c0d744a9
CO
683 if (kvm_is_ucontrol(vcpu->kvm))
684 rc = -EOPNOTSUPP;
685 else
686 rc = kvm_handle_sie_intercept(vcpu);
8f2abe6a
CB
687 } while (!signal_pending(current) && !rc);
688
9ace903d
CE
689 if (rc == SIE_INTERCEPT_RERUNVCPU)
690 goto rerun_vcpu;
691
b1d16c49
CE
692 if (signal_pending(current) && !rc) {
693 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 694 rc = -EINTR;
b1d16c49 695 }
8f2abe6a 696
e168bf8d
CO
697#ifdef CONFIG_KVM_S390_UCONTROL
698 if (rc == SIE_INTERCEPT_UCONTROL) {
699 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
700 kvm_run->s390_ucontrol.trans_exc_code =
701 current->thread.gmap_addr;
702 kvm_run->s390_ucontrol.pgm_code = 0x10;
703 rc = 0;
704 }
705#endif
706
b8e660b8 707 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
708 /* intercept cannot be handled in-kernel, prepare kvm-run */
709 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
710 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
711 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
712 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
713 rc = 0;
714 }
715
716 if (rc == -EREMOTE) {
717 /* intercept was handled, but userspace support is needed
718 * kvm_run has been prepared by the handler */
719 rc = 0;
720 }
b0c632db 721
d7b0b5eb
CO
722 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
723 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
60b413c9 724 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
9eed0735 725 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
d7b0b5eb 726
b0c632db
HC
727 if (vcpu->sigset_active)
728 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
729
b0c632db 730 vcpu->stat.exit_userspace++;
7e8e6ab4 731 return rc;
b0c632db
HC
732}
733
092670cd 734static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
735 unsigned long n, int prefix)
736{
737 if (prefix)
738 return copy_to_guest(vcpu, guestdest, from, n);
739 else
740 return copy_to_guest_absolute(vcpu, guestdest, from, n);
741}
742
743/*
744 * store status at address
745 * we use have two special cases:
746 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
747 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
748 */
971eb77f 749int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 750{
092670cd 751 unsigned char archmode = 1;
b0c632db
HC
752 int prefix;
753
754 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
755 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
756 return -EFAULT;
757 addr = SAVE_AREA_BASE;
758 prefix = 0;
759 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
760 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
761 return -EFAULT;
762 addr = SAVE_AREA_BASE;
763 prefix = 1;
764 } else
765 prefix = 0;
766
f64ca217 767 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
768 vcpu->arch.guest_fpregs.fprs, 128, prefix))
769 return -EFAULT;
770
f64ca217 771 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
5a32c1af 772 vcpu->run->s.regs.gprs, 128, prefix))
b0c632db
HC
773 return -EFAULT;
774
f64ca217 775 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
776 &vcpu->arch.sie_block->gpsw, 16, prefix))
777 return -EFAULT;
778
f64ca217 779 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
780 &vcpu->arch.sie_block->prefix, 4, prefix))
781 return -EFAULT;
782
783 if (__guestcopy(vcpu,
f64ca217 784 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
785 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
786 return -EFAULT;
787
f64ca217 788 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
789 &vcpu->arch.sie_block->todpr, 4, prefix))
790 return -EFAULT;
791
f64ca217 792 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
793 &vcpu->arch.sie_block->cputm, 8, prefix))
794 return -EFAULT;
795
f64ca217 796 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
797 &vcpu->arch.sie_block->ckc, 8, prefix))
798 return -EFAULT;
799
f64ca217 800 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
59674c1a 801 &vcpu->run->s.regs.acrs, 64, prefix))
b0c632db
HC
802 return -EFAULT;
803
804 if (__guestcopy(vcpu,
f64ca217 805 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
806 &vcpu->arch.sie_block->gcr, 128, prefix))
807 return -EFAULT;
808 return 0;
809}
810
b0c632db
HC
811long kvm_arch_vcpu_ioctl(struct file *filp,
812 unsigned int ioctl, unsigned long arg)
813{
814 struct kvm_vcpu *vcpu = filp->private_data;
815 void __user *argp = (void __user *)arg;
bc923cc9 816 long r;
b0c632db 817
93736624
AK
818 switch (ioctl) {
819 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
820 struct kvm_s390_interrupt s390int;
821
93736624 822 r = -EFAULT;
ba5c1e9b 823 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
824 break;
825 r = kvm_s390_inject_vcpu(vcpu, &s390int);
826 break;
ba5c1e9b 827 }
b0c632db 828 case KVM_S390_STORE_STATUS:
bc923cc9
AK
829 r = kvm_s390_vcpu_store_status(vcpu, arg);
830 break;
b0c632db
HC
831 case KVM_S390_SET_INITIAL_PSW: {
832 psw_t psw;
833
bc923cc9 834 r = -EFAULT;
b0c632db 835 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
836 break;
837 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
838 break;
b0c632db
HC
839 }
840 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
841 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
842 break;
14eebd91
CO
843 case KVM_SET_ONE_REG:
844 case KVM_GET_ONE_REG: {
845 struct kvm_one_reg reg;
846 r = -EFAULT;
847 if (copy_from_user(&reg, argp, sizeof(reg)))
848 break;
849 if (ioctl == KVM_SET_ONE_REG)
850 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
851 else
852 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
853 break;
854 }
27e0393f
CO
855#ifdef CONFIG_KVM_S390_UCONTROL
856 case KVM_S390_UCAS_MAP: {
857 struct kvm_s390_ucas_mapping ucasmap;
858
859 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
860 r = -EFAULT;
861 break;
862 }
863
864 if (!kvm_is_ucontrol(vcpu->kvm)) {
865 r = -EINVAL;
866 break;
867 }
868
869 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
870 ucasmap.vcpu_addr, ucasmap.length);
871 break;
872 }
873 case KVM_S390_UCAS_UNMAP: {
874 struct kvm_s390_ucas_mapping ucasmap;
875
876 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
877 r = -EFAULT;
878 break;
879 }
880
881 if (!kvm_is_ucontrol(vcpu->kvm)) {
882 r = -EINVAL;
883 break;
884 }
885
886 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
887 ucasmap.length);
888 break;
889 }
890#endif
ccc7910f
CO
891 case KVM_S390_VCPU_FAULT: {
892 r = gmap_fault(arg, vcpu->arch.gmap);
893 if (!IS_ERR_VALUE(r))
894 r = 0;
895 break;
896 }
b0c632db 897 default:
3e6afcf1 898 r = -ENOTTY;
b0c632db 899 }
bc923cc9 900 return r;
b0c632db
HC
901}
902
5b1c1493
CO
903int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
904{
905#ifdef CONFIG_KVM_S390_UCONTROL
906 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
907 && (kvm_is_ucontrol(vcpu->kvm))) {
908 vmf->page = virt_to_page(vcpu->arch.sie_block);
909 get_page(vmf->page);
910 return 0;
911 }
912#endif
913 return VM_FAULT_SIGBUS;
914}
915
db3fe4eb
TY
916void kvm_arch_free_memslot(struct kvm_memory_slot *free,
917 struct kvm_memory_slot *dont)
918{
919}
920
921int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
922{
923 return 0;
924}
925
b0c632db 926/* Section: memory related */
f7784b8e
MT
927int kvm_arch_prepare_memory_region(struct kvm *kvm,
928 struct kvm_memory_slot *memslot,
929 struct kvm_memory_slot old,
930 struct kvm_userspace_memory_region *mem,
f82a8cfe 931 bool user_alloc)
b0c632db
HC
932{
933 /* A few sanity checks. We can have exactly one memory slot which has
934 to start at guest virtual zero and which has to be located at a
935 page boundary in userland and which has to end at a page boundary.
936 The memory in userland is ok to be fragmented into various different
937 vmas. It is okay to mmap() and munmap() stuff in this slot after
938 doing this call at any time */
939
628eb9b8 940 if (mem->slot)
b0c632db
HC
941 return -EINVAL;
942
943 if (mem->guest_phys_addr)
944 return -EINVAL;
945
598841ca 946 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
947 return -EINVAL;
948
598841ca 949 if (mem->memory_size & 0xffffful)
b0c632db
HC
950 return -EINVAL;
951
2668dab7
CO
952 if (!user_alloc)
953 return -EINVAL;
954
f7784b8e
MT
955 return 0;
956}
957
958void kvm_arch_commit_memory_region(struct kvm *kvm,
959 struct kvm_userspace_memory_region *mem,
960 struct kvm_memory_slot old,
f82a8cfe 961 bool user_alloc)
f7784b8e 962{
f7850c92 963 int rc;
f7784b8e 964
598841ca
CO
965
966 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
967 mem->guest_phys_addr, mem->memory_size);
968 if (rc)
f7850c92 969 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 970 return;
b0c632db
HC
971}
972
2df72e9b
MT
973void kvm_arch_flush_shadow_all(struct kvm *kvm)
974{
975}
976
977void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
978 struct kvm_memory_slot *slot)
34d4cb8f
MT
979{
980}
981
b0c632db
HC
982static int __init kvm_s390_init(void)
983{
ef50f7ac 984 int ret;
0ee75bea 985 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
986 if (ret)
987 return ret;
988
989 /*
990 * guests can ask for up to 255+1 double words, we need a full page
25985edc 991 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
992 * only set facilities that are known to work in KVM.
993 */
c2f0e8c8 994 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
995 if (!facilities) {
996 kvm_exit();
997 return -ENOMEM;
998 }
14375bc4 999 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
6d00d00b 1000 facilities[0] &= 0xff00fff3f47c0000ULL;
87cac8f8 1001 facilities[1] &= 0x001c000000000000ULL;
ef50f7ac 1002 return 0;
b0c632db
HC
1003}
1004
1005static void __exit kvm_s390_exit(void)
1006{
ef50f7ac 1007 free_page((unsigned long) facilities);
b0c632db
HC
1008 kvm_exit();
1009}
1010
1011module_init(kvm_s390_init);
1012module_exit(kvm_s390_exit);