Merge git://git.kernel.org/pub/scm/linux/kernel/git/sfrench/cifs-2.6
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / common / gic.c
1 /*
2 * linux/arch/arm/common/gic.c
3 *
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Interrupt architecture for the GIC:
11 *
12 * o There is one Interrupt Distributor, which receives interrupts
13 * from system devices and sends them to the Interrupt Controllers.
14 *
15 * o There is one CPU Interface per CPU, which sends interrupts sent
16 * by the Distributor, and interrupts generated locally, to the
17 * associated CPU. The base address of the CPU interface is usually
18 * aliased so that the same address points to different chips depending
19 * on the CPU it is accessed from.
20 *
21 * Note that IRQs 0-31 are special - they are local to each CPU.
22 * As such, the enable set/clear, pending set/clear and active bit
23 * registers are banked per-cpu for these sources.
24 */
25 #include <linux/init.h>
26 #include <linux/kernel.h>
27 #include <linux/list.h>
28 #include <linux/smp.h>
29 #include <linux/cpumask.h>
30 #include <linux/io.h>
31
32 #include <asm/irq.h>
33 #include <asm/mach/irq.h>
34 #include <asm/hardware/gic.h>
35
36 static DEFINE_SPINLOCK(irq_controller_lock);
37
38 /* Address of GIC 0 CPU interface */
39 void __iomem *gic_cpu_base_addr __read_mostly;
40
41 struct gic_chip_data {
42 unsigned int irq_offset;
43 void __iomem *dist_base;
44 void __iomem *cpu_base;
45 };
46
47 #ifndef MAX_GIC_NR
48 #define MAX_GIC_NR 1
49 #endif
50
51 static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
52
53 static inline void __iomem *gic_dist_base(struct irq_data *d)
54 {
55 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
56 return gic_data->dist_base;
57 }
58
59 static inline void __iomem *gic_cpu_base(struct irq_data *d)
60 {
61 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
62 return gic_data->cpu_base;
63 }
64
65 static inline unsigned int gic_irq(struct irq_data *d)
66 {
67 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
68 return d->irq - gic_data->irq_offset;
69 }
70
71 /*
72 * Routines to acknowledge, disable and enable interrupts
73 */
74 static void gic_ack_irq(struct irq_data *d)
75 {
76 spin_lock(&irq_controller_lock);
77 writel(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
78 spin_unlock(&irq_controller_lock);
79 }
80
81 static void gic_mask_irq(struct irq_data *d)
82 {
83 u32 mask = 1 << (d->irq % 32);
84
85 spin_lock(&irq_controller_lock);
86 writel(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
87 spin_unlock(&irq_controller_lock);
88 }
89
90 static void gic_unmask_irq(struct irq_data *d)
91 {
92 u32 mask = 1 << (d->irq % 32);
93
94 spin_lock(&irq_controller_lock);
95 writel(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
96 spin_unlock(&irq_controller_lock);
97 }
98
99 static int gic_set_type(struct irq_data *d, unsigned int type)
100 {
101 void __iomem *base = gic_dist_base(d);
102 unsigned int gicirq = gic_irq(d);
103 u32 enablemask = 1 << (gicirq % 32);
104 u32 enableoff = (gicirq / 32) * 4;
105 u32 confmask = 0x2 << ((gicirq % 16) * 2);
106 u32 confoff = (gicirq / 16) * 4;
107 bool enabled = false;
108 u32 val;
109
110 /* Interrupt configuration for SGIs can't be changed */
111 if (gicirq < 16)
112 return -EINVAL;
113
114 if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
115 return -EINVAL;
116
117 spin_lock(&irq_controller_lock);
118
119 val = readl(base + GIC_DIST_CONFIG + confoff);
120 if (type == IRQ_TYPE_LEVEL_HIGH)
121 val &= ~confmask;
122 else if (type == IRQ_TYPE_EDGE_RISING)
123 val |= confmask;
124
125 /*
126 * As recommended by the spec, disable the interrupt before changing
127 * the configuration
128 */
129 if (readl(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
130 writel(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
131 enabled = true;
132 }
133
134 writel(val, base + GIC_DIST_CONFIG + confoff);
135
136 if (enabled)
137 writel(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
138
139 spin_unlock(&irq_controller_lock);
140
141 return 0;
142 }
143
144 #ifdef CONFIG_SMP
145 static int
146 gic_set_cpu(struct irq_data *d, const struct cpumask *mask_val, bool force)
147 {
148 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
149 unsigned int shift = (d->irq % 4) * 8;
150 unsigned int cpu = cpumask_first(mask_val);
151 u32 val;
152 struct irq_desc *desc;
153
154 spin_lock(&irq_controller_lock);
155 desc = irq_to_desc(d->irq);
156 if (desc == NULL) {
157 spin_unlock(&irq_controller_lock);
158 return -EINVAL;
159 }
160 d->node = cpu;
161 val = readl(reg) & ~(0xff << shift);
162 val |= 1 << (cpu + shift);
163 writel(val, reg);
164 spin_unlock(&irq_controller_lock);
165
166 return 0;
167 }
168 #endif
169
170 static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
171 {
172 struct gic_chip_data *chip_data = get_irq_data(irq);
173 struct irq_chip *chip = get_irq_chip(irq);
174 unsigned int cascade_irq, gic_irq;
175 unsigned long status;
176
177 /* primary controller ack'ing */
178 chip->irq_ack(&desc->irq_data);
179
180 spin_lock(&irq_controller_lock);
181 status = readl(chip_data->cpu_base + GIC_CPU_INTACK);
182 spin_unlock(&irq_controller_lock);
183
184 gic_irq = (status & 0x3ff);
185 if (gic_irq == 1023)
186 goto out;
187
188 cascade_irq = gic_irq + chip_data->irq_offset;
189 if (unlikely(gic_irq < 32 || gic_irq > 1020 || cascade_irq >= NR_IRQS))
190 do_bad_IRQ(cascade_irq, desc);
191 else
192 generic_handle_irq(cascade_irq);
193
194 out:
195 /* primary controller unmasking */
196 chip->irq_unmask(&desc->irq_data);
197 }
198
199 static struct irq_chip gic_chip = {
200 .name = "GIC",
201 .irq_ack = gic_ack_irq,
202 .irq_mask = gic_mask_irq,
203 .irq_unmask = gic_unmask_irq,
204 .irq_set_type = gic_set_type,
205 #ifdef CONFIG_SMP
206 .irq_set_affinity = gic_set_cpu,
207 #endif
208 };
209
210 void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
211 {
212 if (gic_nr >= MAX_GIC_NR)
213 BUG();
214 if (set_irq_data(irq, &gic_data[gic_nr]) != 0)
215 BUG();
216 set_irq_chained_handler(irq, gic_handle_cascade_irq);
217 }
218
219 static void __init gic_dist_init(struct gic_chip_data *gic,
220 unsigned int irq_start)
221 {
222 unsigned int gic_irqs, irq_limit, i;
223 void __iomem *base = gic->dist_base;
224 u32 cpumask = 1 << smp_processor_id();
225
226 cpumask |= cpumask << 8;
227 cpumask |= cpumask << 16;
228
229 writel(0, base + GIC_DIST_CTRL);
230
231 /*
232 * Find out how many interrupts are supported.
233 * The GIC only supports up to 1020 interrupt sources.
234 */
235 gic_irqs = readl(base + GIC_DIST_CTR) & 0x1f;
236 gic_irqs = (gic_irqs + 1) * 32;
237 if (gic_irqs > 1020)
238 gic_irqs = 1020;
239
240 /*
241 * Set all global interrupts to be level triggered, active low.
242 */
243 for (i = 32; i < gic_irqs; i += 16)
244 writel(0, base + GIC_DIST_CONFIG + i * 4 / 16);
245
246 /*
247 * Set all global interrupts to this CPU only.
248 */
249 for (i = 32; i < gic_irqs; i += 4)
250 writel(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
251
252 /*
253 * Set priority on all global interrupts.
254 */
255 for (i = 32; i < gic_irqs; i += 4)
256 writel(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
257
258 /*
259 * Disable all interrupts. Leave the PPI and SGIs alone
260 * as these enables are banked registers.
261 */
262 for (i = 32; i < gic_irqs; i += 32)
263 writel(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
264
265 /*
266 * Limit number of interrupts registered to the platform maximum
267 */
268 irq_limit = gic->irq_offset + gic_irqs;
269 if (WARN_ON(irq_limit > NR_IRQS))
270 irq_limit = NR_IRQS;
271
272 /*
273 * Setup the Linux IRQ subsystem.
274 */
275 for (i = irq_start; i < irq_limit; i++) {
276 set_irq_chip(i, &gic_chip);
277 set_irq_chip_data(i, gic);
278 set_irq_handler(i, handle_level_irq);
279 set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
280 }
281
282 writel(1, base + GIC_DIST_CTRL);
283 }
284
285 static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
286 {
287 void __iomem *dist_base = gic->dist_base;
288 void __iomem *base = gic->cpu_base;
289 int i;
290
291 /*
292 * Deal with the banked PPI and SGI interrupts - disable all
293 * PPI interrupts, ensure all SGI interrupts are enabled.
294 */
295 writel(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
296 writel(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
297
298 /*
299 * Set priority on PPI and SGI interrupts
300 */
301 for (i = 0; i < 32; i += 4)
302 writel(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
303
304 writel(0xf0, base + GIC_CPU_PRIMASK);
305 writel(1, base + GIC_CPU_CTRL);
306 }
307
308 void __init gic_init(unsigned int gic_nr, unsigned int irq_start,
309 void __iomem *dist_base, void __iomem *cpu_base)
310 {
311 struct gic_chip_data *gic;
312
313 BUG_ON(gic_nr >= MAX_GIC_NR);
314
315 gic = &gic_data[gic_nr];
316 gic->dist_base = dist_base;
317 gic->cpu_base = cpu_base;
318 gic->irq_offset = (irq_start - 1) & ~31;
319
320 if (gic_nr == 0)
321 gic_cpu_base_addr = cpu_base;
322
323 gic_dist_init(gic, irq_start);
324 gic_cpu_init(gic);
325 }
326
327 void __cpuinit gic_secondary_init(unsigned int gic_nr)
328 {
329 BUG_ON(gic_nr >= MAX_GIC_NR);
330
331 gic_cpu_init(&gic_data[gic_nr]);
332 }
333
334 void __cpuinit gic_enable_ppi(unsigned int irq)
335 {
336 unsigned long flags;
337
338 local_irq_save(flags);
339 irq_to_desc(irq)->status |= IRQ_NOPROBE;
340 gic_unmask_irq(irq_get_irq_data(irq));
341 local_irq_restore(flags);
342 }
343
344 #ifdef CONFIG_SMP
345 void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
346 {
347 unsigned long map = *cpus_addr(*mask);
348
349 /* this always happens on GIC0 */
350 writel(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT);
351 }
352 #endif