{
union ia64_lid lid;
int i;
+ struct kvm_vcpu *vcpu;
- for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) {
- if (kvm->vcpus[i]) {
- lid.val = VCPU_LID(kvm->vcpus[i]);
- if (lid.id == id && lid.eid == eid)
- return kvm->vcpus[i];
- }
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ lid.val = VCPU_LID(vcpu);
+ if (lid.id == id && lid.eid == eid)
+ return vcpu;
}
return NULL;
struct kvm *kvm = vcpu->kvm;
struct call_data call_data;
int i;
+ struct kvm_vcpu *vcpui;
call_data.ptc_g_data = p->u.ptc_g_data;
- for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) {
- if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state ==
- KVM_MP_STATE_UNINITIALIZED ||
- vcpu == kvm->vcpus[i])
+ kvm_for_each_vcpu(i, vcpui, kvm) {
+ if (vcpui->arch.mp_state == KVM_MP_STATE_UNINITIALIZED ||
+ vcpu == vcpui)
continue;
- if (waitqueue_active(&kvm->vcpus[i]->wq))
- wake_up_interruptible(&kvm->vcpus[i]->wq);
+ if (waitqueue_active(&vcpui->wq))
+ wake_up_interruptible(&vcpui->wq);
- if (kvm->vcpus[i]->cpu != -1) {
- call_data.vcpu = kvm->vcpus[i];
- smp_call_function_single(kvm->vcpus[i]->cpu,
+ if (vcpui->cpu != -1) {
+ call_data.vcpu = vcpui;
+ smp_call_function_single(vcpui->cpu,
vcpu_global_purge, &call_data, 1);
} else
printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n");
static void kvmppc_free_vcpus(struct kvm *kvm)
{
unsigned int i;
+ struct kvm_vcpu *vcpu;
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- if (kvm->vcpus[i]) {
- kvm_arch_vcpu_free(kvm->vcpus[i]);
- kvm->vcpus[i] = NULL;
- }
- }
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_arch_vcpu_free(vcpu);
+
+ mutex_lock(&kvm->lock);
+ for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
+ kvm->vcpus[i] = NULL;
+
+ atomic_set(&kvm->online_vcpus, 0);
+ mutex_unlock(&kvm->lock);
}
void kvm_arch_sync_events(struct kvm *kvm)
static void kvm_free_vcpus(struct kvm *kvm)
{
unsigned int i;
+ struct kvm_vcpu *vcpu;
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- if (kvm->vcpus[i]) {
- kvm_arch_vcpu_destroy(kvm->vcpus[i]);
- kvm->vcpus[i] = NULL;
- }
- }
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_arch_vcpu_destroy(vcpu);
+
+ mutex_lock(&kvm->lock);
+ for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
+ kvm->vcpus[i] = NULL;
+
+ atomic_set(&kvm->online_vcpus, 0);
+ mutex_unlock(&kvm->lock);
}
void kvm_arch_sync_events(struct kvm *kvm)
BUG_ON(!kvm->arch.sca);
if (!kvm->arch.sca->cpu[id].sda)
kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
- else
- BUG_ON(!kvm->vcpus[id]); /* vcpu does already exist */
vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
int user_alloc)
{
int i;
+ struct kvm_vcpu *vcpu;
/* A few sanity checks. We can have exactly one memory slot which has
to start at guest virtual zero and which has to be located at a
return -EINVAL;
/* request update of sie control block for all available vcpus */
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- if (kvm->vcpus[i]) {
- if (test_and_set_bit(KVM_REQ_MMU_RELOAD,
- &kvm->vcpus[i]->requests))
- continue;
- kvm_s390_inject_sigp_stop(kvm->vcpus[i],
- ACTION_RELOADVCPU_ON_STOP);
- }
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
+ continue;
+ kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
}
return 0;
* VCPU0, and only if its LVT0 is in EXTINT mode.
*/
if (kvm->arch.vapics_in_nmi_mode > 0)
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- vcpu = kvm->vcpus[i];
- if (vcpu)
- kvm_apic_nmi_wd_deliver(vcpu);
- }
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_apic_nmi_wd_deliver(vcpu);
}
void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu)
static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
{
int i;
+ struct kvm_vcpu *vcpu;
- for (i = 0; i < KVM_MAX_VCPUS; ++i)
- if (kvm->vcpus[i])
- kvm->vcpus[i]->arch.last_pte_updated = NULL;
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ vcpu->arch.last_pte_updated = NULL;
}
static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- vcpu = kvm->vcpus[i];
- if (!vcpu)
- continue;
+ kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->cpu != freq->cpu)
continue;
if (!kvm_request_guest_time_update(vcpu))
static void kvm_free_vcpus(struct kvm *kvm)
{
unsigned int i;
+ struct kvm_vcpu *vcpu;
/*
* Unpin any mmu pages first.
*/
- for (i = 0; i < KVM_MAX_VCPUS; ++i)
- if (kvm->vcpus[i])
- kvm_unload_vcpu_mmu(kvm->vcpus[i]);
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- if (kvm->vcpus[i]) {
- kvm_arch_vcpu_free(kvm->vcpus[i]);
- kvm->vcpus[i] = NULL;
- }
- }
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_unload_vcpu_mmu(vcpu);
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_arch_vcpu_free(vcpu);
+
+ mutex_lock(&kvm->lock);
+ for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
+ kvm->vcpus[i] = NULL;
+ atomic_set(&kvm->online_vcpus, 0);
+ mutex_unlock(&kvm->lock);
}
void kvm_arch_sync_events(struct kvm *kvm)
#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
+static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
+{
+ smp_rmb();
+ return kvm->vcpus[i];
+}
+
+#define kvm_for_each_vcpu(idx, vcpup, kvm) \
+ for (idx = 0, vcpup = kvm_get_vcpu(kvm, idx); \
+ idx < atomic_read(&kvm->online_vcpus) && vcpup; \
+ vcpup = kvm_get_vcpu(kvm, ++idx))
+
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
kvm_is_dm_lowest_prio(irq))
printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n");
- for (i = 0; i < KVM_MAX_VCPUS; i++) {
- vcpu = kvm->vcpus[i];
-
- if (!vcpu || !kvm_apic_present(vcpu))
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (!kvm_apic_present(vcpu))
continue;
if (!kvm_apic_match_dest(vcpu, src, irq->shorthand,
me = get_cpu();
spin_lock(&kvm->requests_lock);
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- vcpu = kvm->vcpus[i];
- if (!vcpu)
- continue;
+ kvm_for_each_vcpu(i, vcpu, kvm) {
if (test_and_set_bit(req, &vcpu->requests))
continue;
cpu = vcpu->cpu;
static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
{
int r;
- struct kvm_vcpu *vcpu;
+ struct kvm_vcpu *vcpu, *v;
vcpu = kvm_arch_vcpu_create(kvm, id);
if (IS_ERR(vcpu))
goto vcpu_destroy;
}
- for (r = 0; r < atomic_read(&kvm->online_vcpus); r++)
- if (kvm->vcpus[r]->vcpu_id == id) {
+ kvm_for_each_vcpu(r, v, kvm)
+ if (v->vcpu_id == id) {
r = -EEXIST;
goto vcpu_destroy;
}
*val = 0;
spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list)
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- vcpu = kvm->vcpus[i];
- if (vcpu)
- *val += *(u32 *)((void *)vcpu + offset);
- }
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ *val += *(u32 *)((void *)vcpu + offset);
+
spin_unlock(&kvm_lock);
return 0;
}