kvm_vcpu_kick(vcpu);
}
-u32 kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
- u8 dest_mode)
+void kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
+ u8 dest_mode, unsigned long *mask)
{
- u32 mask = 0;
int i;
struct kvm *kvm = ioapic->kvm;
struct kvm_vcpu *vcpu;
ioapic_debug("dest %d dest_mode %d\n", dest, dest_mode);
+ *mask = 0;
if (dest_mode == 0) { /* Physical mode. */
if (dest == 0xFF) { /* Broadcast. */
for (i = 0; i < KVM_MAX_VCPUS; ++i)
if (kvm->vcpus[i] && kvm->vcpus[i]->arch.apic)
- mask |= 1 << i;
- return mask;
+ *mask |= 1 << i;
+ return;
}
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
vcpu = kvm->vcpus[i];
continue;
if (kvm_apic_match_physical_addr(vcpu->arch.apic, dest)) {
if (vcpu->arch.apic)
- mask = 1 << i;
+ *mask = 1 << i;
break;
}
}
continue;
if (vcpu->arch.apic &&
kvm_apic_match_logical_addr(vcpu->arch.apic, dest))
- mask |= 1 << vcpu->vcpu_id;
+ *mask |= 1 << vcpu->vcpu_id;
}
- ioapic_debug("mask %x\n", mask);
- return mask;
+ ioapic_debug("mask %x\n", *mask);
}
static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
int kvm_ioapic_init(struct kvm *kvm);
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level);
void kvm_ioapic_reset(struct kvm_ioapic *ioapic);
-u32 kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
- u8 dest_mode);
+void kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
+ u8 dest_mode, unsigned long *mask);
#endif
{
struct kvm_vcpu *vcpu;
- *deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic,
- entry->fields.dest_id, entry->fields.dest_mode);
+ kvm_ioapic_get_delivery_bitmask(ioapic, entry->fields.dest_id,
+ entry->fields.dest_mode,
+ deliver_bitmask);
switch (entry->fields.delivery_mode) {
case IOAPIC_LOWEST_PRIORITY:
vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm,