Let's reuse the new common function for VPCU lookup by id.
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Dominik Dingel <dingel@linux.vnet.ibm.com>
Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
[split out the new function into a separate patch]
static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
{
- int r;
- struct kvm_vcpu *v, *ret = NULL;
+ struct kvm_vcpu *ret;
mutex_lock(&kvm->lock);
- kvm_for_each_vcpu(r, v, kvm) {
- if (v->vcpu_id == id) {
- ret = v;
- break;
- }
- }
+ ret = kvm_get_vcpu_by_id(kvm, id);
mutex_unlock(&kvm->lock);
return ret;
}
static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
{
- struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu *tcpu;
int tid;
- int i;
tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
vcpu->stat.diagnose_9c++;
if (tid == vcpu->vcpu_id)
return 0;
- kvm_for_each_vcpu(i, tcpu, kvm)
- if (tcpu->vcpu_id == tid) {
- kvm_vcpu_yield_to(tcpu);
- break;
- }
-
+ tcpu = kvm_get_vcpu_by_id(vcpu->kvm, tid);
+ if (tcpu)
+ kvm_vcpu_yield_to(tcpu);
return 0;
}
static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
{
int r;
- struct kvm_vcpu *vcpu, *v;
+ struct kvm_vcpu *vcpu;
if (id >= KVM_MAX_VCPUS)
return -EINVAL;
r = -EINVAL;
goto unlock_vcpu_destroy;
}
-
- kvm_for_each_vcpu(r, v, kvm)
- if (v->vcpu_id == id) {
- r = -EEXIST;
- goto unlock_vcpu_destroy;
- }
+ if (kvm_get_vcpu_by_id(kvm, id)) {
+ r = -EEXIST;
+ goto unlock_vcpu_destroy;
+ }
BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);