tmp2 = GxICR(irq);
irq_affinity_online[irq] =
- cpumask_any_and(d->affinity, cpu_online_mask);
+ cpumask_any_and(irq_data_get_affinity_mask(d),
+ cpu_online_mask);
CROSS_GxICR(irq, irq_affinity_online[irq]) =
(tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT;
tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
} else {
tmp = GxICR(irq);
- irq_affinity_online[irq] = cpumask_any_and(d->affinity,
+ irq_affinity_online[irq] = cpumask_any_and(irq_data_get_affinity_mask(d),
cpu_online_mask);
CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
self = smp_processor_id();
for (irq = 0; irq < NR_IRQS; irq++) {
struct irq_data *data = irq_get_irq_data(irq);
+ struct cpumask *mask = irq_data_get_affinity_mask(data);
if (irqd_is_per_cpu(data))
continue;
- if (cpumask_test_cpu(self, data->affinity) &&
+ if (cpumask_test_cpu(self, mask) &&
!cpumask_intersects(&irq_affinity[irq], cpu_online_mask)) {
int cpu_id;
cpu_id = cpumask_first(cpu_online_mask);
- cpumask_set_cpu(cpu_id, data->affinity);
+ cpumask_set_cpu(cpu_id, mask);
}
/* We need to operate irq_affinity_online atomically. */
arch_local_cli_save(flags);
GxICR(irq) = x & GxICR_LEVEL;
tmp = GxICR(irq);
- new = cpumask_any_and(data->affinity,
- cpu_online_mask);
+ new = cpumask_any_and(mask, cpu_online_mask);
irq_affinity_online[irq] = new;
CROSS_GxICR(irq, new) =