2 * Marvell Armada 370 and Armada XP SoC IRQ handling
4 * Copyright (C) 2012 Marvell
6 * Lior Amsalem <alior@marvell.com>
7 * Gregory CLEMENT <gregory.clement@free-electrons.com>
8 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 * Ben Dooks <ben.dooks@codethink.co.uk>
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/irq.h>
20 #include <linux/interrupt.h>
22 #include <linux/of_address.h>
23 #include <linux/of_irq.h>
24 #include <linux/irqdomain.h>
25 #include <asm/mach/arch.h>
26 #include <asm/exception.h>
27 #include <asm/smp_plat.h>
28 #include <asm/mach/irq.h>
32 /* Interrupt Controller Registers Map */
33 #define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48)
34 #define ARMADA_370_XP_INT_CLEAR_MASK_OFFS (0x4C)
36 #define ARMADA_370_XP_INT_CONTROL (0x00)
37 #define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30)
38 #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34)
39 #define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
41 #define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
43 #define ARMADA_370_XP_SW_TRIG_INT_OFFS (0x4)
44 #define ARMADA_370_XP_IN_DRBEL_MSK_OFFS (0xc)
45 #define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS (0x8)
47 #define ARMADA_370_XP_MAX_PER_CPU_IRQS (28)
49 #define ARMADA_370_XP_TIMER0_PER_CPU_IRQ (5)
51 #define IPI_DOORBELL_START (0)
52 #define IPI_DOORBELL_END (8)
53 #define IPI_DOORBELL_MASK 0xFF
55 static DEFINE_RAW_SPINLOCK(irq_controller_lock
);
57 static void __iomem
*per_cpu_int_base
;
58 static void __iomem
*main_int_base
;
59 static struct irq_domain
*armada_370_xp_mpic_domain
;
63 * For shared global interrupts, mask/unmask global enable bit
64 * For CPU interrtups, mask/unmask the calling CPU's bit
66 static void armada_370_xp_irq_mask(struct irq_data
*d
)
69 irq_hw_number_t hwirq
= irqd_to_hwirq(d
);
71 if (hwirq
!= ARMADA_370_XP_TIMER0_PER_CPU_IRQ
)
72 writel(hwirq
, main_int_base
+
73 ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS
);
75 writel(hwirq
, per_cpu_int_base
+
76 ARMADA_370_XP_INT_SET_MASK_OFFS
);
78 writel(irqd_to_hwirq(d
),
79 per_cpu_int_base
+ ARMADA_370_XP_INT_SET_MASK_OFFS
);
83 static void armada_370_xp_irq_unmask(struct irq_data
*d
)
86 irq_hw_number_t hwirq
= irqd_to_hwirq(d
);
88 if (hwirq
!= ARMADA_370_XP_TIMER0_PER_CPU_IRQ
)
89 writel(hwirq
, main_int_base
+
90 ARMADA_370_XP_INT_SET_ENABLE_OFFS
);
92 writel(hwirq
, per_cpu_int_base
+
93 ARMADA_370_XP_INT_CLEAR_MASK_OFFS
);
95 writel(irqd_to_hwirq(d
),
96 per_cpu_int_base
+ ARMADA_370_XP_INT_CLEAR_MASK_OFFS
);
101 static int armada_xp_set_affinity(struct irq_data
*d
,
102 const struct cpumask
*mask_val
, bool force
)
105 unsigned long new_mask
= 0;
106 unsigned long online_mask
= 0;
107 unsigned long count
= 0;
108 irq_hw_number_t hwirq
= irqd_to_hwirq(d
);
111 for_each_cpu(cpu
, mask_val
) {
112 new_mask
|= 1 << cpu_logical_map(cpu
);
117 * Forbid mutlicore interrupt affinity
118 * This is required since the MPIC HW doesn't limit
119 * several CPUs from acknowledging the same interrupt.
124 for_each_cpu(cpu
, cpu_online_mask
)
125 online_mask
|= 1 << cpu_logical_map(cpu
);
127 raw_spin_lock(&irq_controller_lock
);
129 reg
= readl(main_int_base
+ ARMADA_370_XP_INT_SOURCE_CTL(hwirq
));
130 reg
= (reg
& (~online_mask
)) | new_mask
;
131 writel(reg
, main_int_base
+ ARMADA_370_XP_INT_SOURCE_CTL(hwirq
));
133 raw_spin_unlock(&irq_controller_lock
);
139 static struct irq_chip armada_370_xp_irq_chip
= {
140 .name
= "armada_370_xp_irq",
141 .irq_mask
= armada_370_xp_irq_mask
,
142 .irq_mask_ack
= armada_370_xp_irq_mask
,
143 .irq_unmask
= armada_370_xp_irq_unmask
,
145 .irq_set_affinity
= armada_xp_set_affinity
,
149 static int armada_370_xp_mpic_irq_map(struct irq_domain
*h
,
150 unsigned int virq
, irq_hw_number_t hw
)
152 armada_370_xp_irq_mask(irq_get_irq_data(virq
));
153 writel(hw
, main_int_base
+ ARMADA_370_XP_INT_SET_ENABLE_OFFS
);
154 irq_set_status_flags(virq
, IRQ_LEVEL
);
156 if (hw
== ARMADA_370_XP_TIMER0_PER_CPU_IRQ
) {
157 irq_set_percpu_devid(virq
);
158 irq_set_chip_and_handler(virq
, &armada_370_xp_irq_chip
,
159 handle_percpu_devid_irq
);
162 irq_set_chip_and_handler(virq
, &armada_370_xp_irq_chip
,
165 set_irq_flags(virq
, IRQF_VALID
| IRQF_PROBE
);
171 void armada_mpic_send_doorbell(const struct cpumask
*mask
, unsigned int irq
)
174 unsigned long map
= 0;
176 /* Convert our logical CPU mask into a physical one. */
177 for_each_cpu(cpu
, mask
)
178 map
|= 1 << cpu_logical_map(cpu
);
181 * Ensure that stores to Normal memory are visible to the
182 * other CPUs before issuing the IPI.
187 writel((map
<< 8) | irq
, main_int_base
+
188 ARMADA_370_XP_SW_TRIG_INT_OFFS
);
191 void armada_xp_mpic_smp_cpu_init(void)
193 /* Clear pending IPIs */
194 writel(0, per_cpu_int_base
+ ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS
);
196 /* Enable first 8 IPIs */
197 writel(IPI_DOORBELL_MASK
, per_cpu_int_base
+
198 ARMADA_370_XP_IN_DRBEL_MSK_OFFS
);
200 /* Unmask IPI interrupt */
201 writel(0, per_cpu_int_base
+ ARMADA_370_XP_INT_CLEAR_MASK_OFFS
);
203 #endif /* CONFIG_SMP */
205 static struct irq_domain_ops armada_370_xp_mpic_irq_ops
= {
206 .map
= armada_370_xp_mpic_irq_map
,
207 .xlate
= irq_domain_xlate_onecell
,
210 static asmlinkage
void __exception_irq_entry
211 armada_370_xp_handle_irq(struct pt_regs
*regs
)
216 irqstat
= readl_relaxed(per_cpu_int_base
+
217 ARMADA_370_XP_CPU_INTACK_OFFS
);
218 irqnr
= irqstat
& 0x3FF;
224 irqnr
= irq_find_mapping(armada_370_xp_mpic_domain
,
226 handle_IRQ(irqnr
, regs
);
234 ipimask
= readl_relaxed(per_cpu_int_base
+
235 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS
)
238 writel(~IPI_DOORBELL_MASK
, per_cpu_int_base
+
239 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS
);
241 /* Handle all pending doorbells */
242 for (ipinr
= IPI_DOORBELL_START
;
243 ipinr
< IPI_DOORBELL_END
; ipinr
++) {
244 if (ipimask
& (0x1 << ipinr
))
245 handle_IPI(ipinr
, regs
);
254 static int __init
armada_370_xp_mpic_of_init(struct device_node
*node
,
255 struct device_node
*parent
)
259 main_int_base
= of_iomap(node
, 0);
260 per_cpu_int_base
= of_iomap(node
, 1);
262 BUG_ON(!main_int_base
);
263 BUG_ON(!per_cpu_int_base
);
265 control
= readl(main_int_base
+ ARMADA_370_XP_INT_CONTROL
);
267 armada_370_xp_mpic_domain
=
268 irq_domain_add_linear(node
, (control
>> 2) & 0x3ff,
269 &armada_370_xp_mpic_irq_ops
, NULL
);
271 if (!armada_370_xp_mpic_domain
)
272 panic("Unable to add Armada_370_Xp MPIC irq domain (DT)\n");
274 irq_set_default_host(armada_370_xp_mpic_domain
);
277 armada_xp_mpic_smp_cpu_init();
280 * Set the default affinity from all CPUs to the boot cpu.
281 * This is required since the MPIC doesn't limit several CPUs
282 * from acknowledging the same interrupt.
284 cpumask_clear(irq_default_affinity
);
285 cpumask_set_cpu(smp_processor_id(), irq_default_affinity
);
289 set_handle_irq(armada_370_xp_handle_irq
);
294 IRQCHIP_DECLARE(armada_370_xp_mpic
, "marvell,mpic", armada_370_xp_mpic_of_init
);