KVM: s390: Implement the directed yield (diag 9c) hypervisor call for KVM
authorKonstantin Weitz <WEITZKON@de.ibm.com>
Wed, 25 Apr 2012 13:30:38 +0000 (15:30 +0200)
committerMarcelo Tosatti <mtosatti@redhat.com>
Tue, 1 May 2012 00:38:31 +0000 (21:38 -0300)
This patch implements the directed yield hypercall found on other
System z hypervisors. It delegates execution time to the virtual cpu
specified in the instruction's parameter.

Useful to avoid long spinlock waits in the guest.

Christian Borntraeger: moved common code in virt/kvm/

Signed-off-by: Konstantin Weitz <WEITZKON@de.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
arch/s390/include/asm/kvm_host.h
arch/s390/kvm/diag.c
arch/s390/kvm/kvm-s390.c
include/linux/kvm_host.h
virt/kvm/kvm_main.c

index 7343872890a25a692eb7925f3a5904e640408880..dd17537b9a9d2362a749a0325d8ed6b86b348ed0 100644 (file)
@@ -148,6 +148,7 @@ struct kvm_vcpu_stat {
        u32 instruction_sigp_restart;
        u32 diagnose_10;
        u32 diagnose_44;
+       u32 diagnose_9c;
 };
 
 struct kvm_s390_io_info {
index a353f0ea45c2235d73455ab9013fbfeb1409223b..2d2ae327b747c2f3b4bb70957d2d8b2da7e50abd 100644 (file)
@@ -53,6 +53,29 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
        return 0;
 }
 
+static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
+{
+       struct kvm *kvm = vcpu->kvm;
+       struct kvm_vcpu *tcpu;
+       int tid;
+       int i;
+
+       tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
+       vcpu->stat.diagnose_9c++;
+       VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d", tid);
+
+       if (tid == vcpu->vcpu_id)
+               return 0;
+
+       kvm_for_each_vcpu(i, tcpu, kvm)
+               if (tcpu->vcpu_id == tid) {
+                       kvm_vcpu_yield_to(tcpu);
+                       break;
+               }
+
+       return 0;
+}
+
 static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
 {
        unsigned int reg = vcpu->arch.sie_block->ipa & 0xf;
@@ -89,6 +112,8 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
                return diag_release_pages(vcpu);
        case 0x44:
                return __diag_time_slice_end(vcpu);
+       case 0x9c:
+               return __diag_time_slice_end_directed(vcpu);
        case 0x308:
                return __diag_ipl_functions(vcpu);
        default:
index d30c8350b9493f748030caeb728c6d8b8537c6d5..fd98914a36f160a2230906c00234674a09f199ba 100644 (file)
@@ -74,6 +74,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
        { "diagnose_10", VCPU_STAT(diagnose_10) },
        { "diagnose_44", VCPU_STAT(diagnose_44) },
+       { "diagnose_9c", VCPU_STAT(diagnose_9c) },
        { NULL }
 };
 
index 6f343307d72b05216aee152ca68f46ecd007132b..cae342d29d1bd7e08fd9ec3a4e625aa6986f1ccf 100644 (file)
@@ -461,6 +461,7 @@ void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
 
 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
+bool kvm_vcpu_yield_to(struct kvm_vcpu *target);
 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
 void kvm_resched(struct kvm_vcpu *vcpu);
 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
index 1847c762d8d949e320c25c678bcdd652a71b8145..7e140683ff14d503a9714058cadd9dde7e4ffaf9 100644 (file)
@@ -1543,6 +1543,31 @@ void kvm_resched(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_resched);
 
+bool kvm_vcpu_yield_to(struct kvm_vcpu *target)
+{
+       struct pid *pid;
+       struct task_struct *task = NULL;
+
+       rcu_read_lock();
+       pid = rcu_dereference(target->pid);
+       if (pid)
+               task = get_pid_task(target->pid, PIDTYPE_PID);
+       rcu_read_unlock();
+       if (!task)
+               return false;
+       if (task->flags & PF_VCPU) {
+               put_task_struct(task);
+               return false;
+       }
+       if (yield_to(task, 1)) {
+               put_task_struct(task);
+               return true;
+       }
+       put_task_struct(task);
+       return false;
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
+
 void kvm_vcpu_on_spin(struct kvm_vcpu *me)
 {
        struct kvm *kvm = me->kvm;
@@ -1561,8 +1586,6 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
         */
        for (pass = 0; pass < 2 && !yielded; pass++) {
                kvm_for_each_vcpu(i, vcpu, kvm) {
-                       struct task_struct *task = NULL;
-                       struct pid *pid;
                        if (!pass && i < last_boosted_vcpu) {
                                i = last_boosted_vcpu;
                                continue;
@@ -1572,24 +1595,11 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
                                continue;
                        if (waitqueue_active(&vcpu->wq))
                                continue;
-                       rcu_read_lock();
-                       pid = rcu_dereference(vcpu->pid);
-                       if (pid)
-                               task = get_pid_task(vcpu->pid, PIDTYPE_PID);
-                       rcu_read_unlock();
-                       if (!task)
-                               continue;
-                       if (task->flags & PF_VCPU) {
-                               put_task_struct(task);
-                               continue;
-                       }
-                       if (yield_to(task, 1)) {
-                               put_task_struct(task);
+                       if (kvm_vcpu_yield_to(vcpu)) {
                                kvm->last_boosted_vcpu = i;
                                yielded = 1;
                                break;
                        }
-                       put_task_struct(task);
                }
        }
 }