irqchip: mips-gic: Clean up header file
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / drivers / irqchip / irq-mips-gic.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
8 */
9 #include <linux/bitmap.h>
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
12 #include <linux/irqchip/mips-gic.h>
13 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/irq.h>
16 #include <linux/clocksource.h>
17
18 #include <asm/io.h>
19 #include <asm/setup.h>
20 #include <asm/traps.h>
21 #include <linux/hardirq.h>
22 #include <asm-generic/bitops/find.h>
23
24 unsigned int gic_frequency;
25 unsigned int gic_present;
26
27 struct gic_pcpu_mask {
28 DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS);
29 };
30
31 struct gic_pending_regs {
32 DECLARE_BITMAP(pending, GIC_MAX_INTRS);
33 };
34
35 struct gic_intrmask_regs {
36 DECLARE_BITMAP(intrmask, GIC_MAX_INTRS);
37 };
38
39 static void __iomem *gic_base;
40 static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
41 static struct gic_pending_regs pending_regs[NR_CPUS];
42 static struct gic_intrmask_regs intrmask_regs[NR_CPUS];
43 static DEFINE_SPINLOCK(gic_lock);
44 static struct irq_domain *gic_irq_domain;
45 static int gic_shared_intrs;
46 static int gic_vpes;
47 static unsigned int gic_cpu_pin;
48 static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
49
50 static void __gic_irq_dispatch(void);
51
52 static inline unsigned int gic_read(unsigned int reg)
53 {
54 return __raw_readl(gic_base + reg);
55 }
56
57 static inline void gic_write(unsigned int reg, unsigned int val)
58 {
59 __raw_writel(val, gic_base + reg);
60 }
61
62 static inline void gic_update_bits(unsigned int reg, unsigned int mask,
63 unsigned int val)
64 {
65 unsigned int regval;
66
67 regval = gic_read(reg);
68 regval &= ~mask;
69 regval |= val;
70 gic_write(reg, regval);
71 }
72
73 static inline void gic_reset_mask(unsigned int intr)
74 {
75 gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr),
76 1 << GIC_INTR_BIT(intr));
77 }
78
79 static inline void gic_set_mask(unsigned int intr)
80 {
81 gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr),
82 1 << GIC_INTR_BIT(intr));
83 }
84
85 static inline void gic_set_polarity(unsigned int intr, unsigned int pol)
86 {
87 gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) +
88 GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr),
89 pol << GIC_INTR_BIT(intr));
90 }
91
92 static inline void gic_set_trigger(unsigned int intr, unsigned int trig)
93 {
94 gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) +
95 GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr),
96 trig << GIC_INTR_BIT(intr));
97 }
98
99 static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual)
100 {
101 gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr),
102 1 << GIC_INTR_BIT(intr),
103 dual << GIC_INTR_BIT(intr));
104 }
105
106 static inline void gic_map_to_pin(unsigned int intr, unsigned int pin)
107 {
108 gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) +
109 GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin);
110 }
111
112 static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
113 {
114 gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_VPE_BASE) +
115 GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe),
116 GIC_SH_MAP_TO_VPE_REG_BIT(vpe));
117 }
118
119 #if defined(CONFIG_CSRC_GIC) || defined(CONFIG_CEVT_GIC)
120 cycle_t gic_read_count(void)
121 {
122 unsigned int hi, hi2, lo;
123
124 do {
125 hi = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
126 lo = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_31_00));
127 hi2 = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
128 } while (hi2 != hi);
129
130 return (((cycle_t) hi) << 32) + lo;
131 }
132
133 unsigned int gic_get_count_width(void)
134 {
135 unsigned int bits, config;
136
137 config = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
138 bits = 32 + 4 * ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >>
139 GIC_SH_CONFIG_COUNTBITS_SHF);
140
141 return bits;
142 }
143
144 void gic_write_compare(cycle_t cnt)
145 {
146 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
147 (int)(cnt >> 32));
148 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
149 (int)(cnt & 0xffffffff));
150 }
151
152 void gic_write_cpu_compare(cycle_t cnt, int cpu)
153 {
154 unsigned long flags;
155
156 local_irq_save(flags);
157
158 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
159 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
160 (int)(cnt >> 32));
161 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
162 (int)(cnt & 0xffffffff));
163
164 local_irq_restore(flags);
165 }
166
167 cycle_t gic_read_compare(void)
168 {
169 unsigned int hi, lo;
170
171 hi = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
172 lo = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
173
174 return (((cycle_t) hi) << 32) + lo;
175 }
176 #endif
177
178 static bool gic_local_irq_is_routable(int intr)
179 {
180 u32 vpe_ctl;
181
182 /* All local interrupts are routable in EIC mode. */
183 if (cpu_has_veic)
184 return true;
185
186 vpe_ctl = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
187 switch (intr) {
188 case GIC_LOCAL_INT_TIMER:
189 return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK;
190 case GIC_LOCAL_INT_PERFCTR:
191 return vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK;
192 case GIC_LOCAL_INT_FDC:
193 return vpe_ctl & GIC_VPE_CTL_FDC_RTBL_MSK;
194 case GIC_LOCAL_INT_SWINT0:
195 case GIC_LOCAL_INT_SWINT1:
196 return vpe_ctl & GIC_VPE_CTL_SWINT_RTBL_MSK;
197 default:
198 return true;
199 }
200 }
201
202 unsigned int gic_get_timer_pending(void)
203 {
204 unsigned int vpe_pending;
205
206 vpe_pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
207 return vpe_pending & GIC_VPE_PEND_TIMER_MSK;
208 }
209
210 static void gic_bind_eic_interrupt(int irq, int set)
211 {
212 /* Convert irq vector # to hw int # */
213 irq -= GIC_PIN_TO_VEC_OFFSET;
214
215 /* Set irq to use shadow set */
216 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_EIC_SHADOW_SET_BASE) +
217 GIC_VPE_EIC_SS(irq), set);
218 }
219
220 void gic_send_ipi(unsigned int intr)
221 {
222 gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), 0x80000000 | intr);
223 }
224
225 int gic_get_c0_compare_int(void)
226 {
227 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
228 return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
229 return irq_create_mapping(gic_irq_domain,
230 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
231 }
232
233 int gic_get_c0_perfcount_int(void)
234 {
235 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
236 /* Is the erformance counter shared with the timer? */
237 if (cp0_perfcount_irq < 0)
238 return -1;
239 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
240 }
241 return irq_create_mapping(gic_irq_domain,
242 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
243 }
244
245 static unsigned int gic_get_int(void)
246 {
247 unsigned int i;
248 unsigned long *pending, *intrmask, *pcpu_mask;
249 unsigned long pending_reg, intrmask_reg;
250
251 /* Get per-cpu bitmaps */
252 pending = pending_regs[smp_processor_id()].pending;
253 intrmask = intrmask_regs[smp_processor_id()].intrmask;
254 pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
255
256 pending_reg = GIC_REG(SHARED, GIC_SH_PEND);
257 intrmask_reg = GIC_REG(SHARED, GIC_SH_MASK);
258
259 for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) {
260 pending[i] = gic_read(pending_reg);
261 intrmask[i] = gic_read(intrmask_reg);
262 pending_reg += 0x4;
263 intrmask_reg += 0x4;
264 }
265
266 bitmap_and(pending, pending, intrmask, gic_shared_intrs);
267 bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
268
269 return find_first_bit(pending, gic_shared_intrs);
270 }
271
272 static void gic_mask_irq(struct irq_data *d)
273 {
274 gic_reset_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
275 }
276
277 static void gic_unmask_irq(struct irq_data *d)
278 {
279 gic_set_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
280 }
281
282 static void gic_ack_irq(struct irq_data *d)
283 {
284 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
285
286 gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), irq);
287 }
288
289 static int gic_set_type(struct irq_data *d, unsigned int type)
290 {
291 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
292 unsigned long flags;
293 bool is_edge;
294
295 spin_lock_irqsave(&gic_lock, flags);
296 switch (type & IRQ_TYPE_SENSE_MASK) {
297 case IRQ_TYPE_EDGE_FALLING:
298 gic_set_polarity(irq, GIC_POL_NEG);
299 gic_set_trigger(irq, GIC_TRIG_EDGE);
300 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
301 is_edge = true;
302 break;
303 case IRQ_TYPE_EDGE_RISING:
304 gic_set_polarity(irq, GIC_POL_POS);
305 gic_set_trigger(irq, GIC_TRIG_EDGE);
306 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
307 is_edge = true;
308 break;
309 case IRQ_TYPE_EDGE_BOTH:
310 /* polarity is irrelevant in this case */
311 gic_set_trigger(irq, GIC_TRIG_EDGE);
312 gic_set_dual_edge(irq, GIC_TRIG_DUAL_ENABLE);
313 is_edge = true;
314 break;
315 case IRQ_TYPE_LEVEL_LOW:
316 gic_set_polarity(irq, GIC_POL_NEG);
317 gic_set_trigger(irq, GIC_TRIG_LEVEL);
318 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
319 is_edge = false;
320 break;
321 case IRQ_TYPE_LEVEL_HIGH:
322 default:
323 gic_set_polarity(irq, GIC_POL_POS);
324 gic_set_trigger(irq, GIC_TRIG_LEVEL);
325 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
326 is_edge = false;
327 break;
328 }
329
330 if (is_edge) {
331 __irq_set_chip_handler_name_locked(d->irq,
332 &gic_edge_irq_controller,
333 handle_edge_irq, NULL);
334 } else {
335 __irq_set_chip_handler_name_locked(d->irq,
336 &gic_level_irq_controller,
337 handle_level_irq, NULL);
338 }
339 spin_unlock_irqrestore(&gic_lock, flags);
340
341 return 0;
342 }
343
344 #ifdef CONFIG_SMP
345 static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
346 bool force)
347 {
348 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
349 cpumask_t tmp = CPU_MASK_NONE;
350 unsigned long flags;
351 int i;
352
353 cpumask_and(&tmp, cpumask, cpu_online_mask);
354 if (cpus_empty(tmp))
355 return -EINVAL;
356
357 /* Assumption : cpumask refers to a single CPU */
358 spin_lock_irqsave(&gic_lock, flags);
359
360 /* Re-route this IRQ */
361 gic_map_to_vpe(irq, first_cpu(tmp));
362
363 /* Update the pcpu_masks */
364 for (i = 0; i < NR_CPUS; i++)
365 clear_bit(irq, pcpu_masks[i].pcpu_mask);
366 set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
367
368 cpumask_copy(d->affinity, cpumask);
369 spin_unlock_irqrestore(&gic_lock, flags);
370
371 return IRQ_SET_MASK_OK_NOCOPY;
372 }
373 #endif
374
375 static struct irq_chip gic_level_irq_controller = {
376 .name = "MIPS GIC",
377 .irq_mask = gic_mask_irq,
378 .irq_unmask = gic_unmask_irq,
379 .irq_set_type = gic_set_type,
380 #ifdef CONFIG_SMP
381 .irq_set_affinity = gic_set_affinity,
382 #endif
383 };
384
385 static struct irq_chip gic_edge_irq_controller = {
386 .name = "MIPS GIC",
387 .irq_ack = gic_ack_irq,
388 .irq_mask = gic_mask_irq,
389 .irq_unmask = gic_unmask_irq,
390 .irq_set_type = gic_set_type,
391 #ifdef CONFIG_SMP
392 .irq_set_affinity = gic_set_affinity,
393 #endif
394 };
395
396 static unsigned int gic_get_local_int(void)
397 {
398 unsigned long pending, masked;
399
400 pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
401 masked = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
402
403 bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);
404
405 return find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
406 }
407
408 static void gic_mask_local_irq(struct irq_data *d)
409 {
410 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
411
412 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
413 }
414
415 static void gic_unmask_local_irq(struct irq_data *d)
416 {
417 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
418
419 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
420 }
421
422 static struct irq_chip gic_local_irq_controller = {
423 .name = "MIPS GIC Local",
424 .irq_mask = gic_mask_local_irq,
425 .irq_unmask = gic_unmask_local_irq,
426 };
427
428 static void gic_mask_local_irq_all_vpes(struct irq_data *d)
429 {
430 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
431 int i;
432 unsigned long flags;
433
434 spin_lock_irqsave(&gic_lock, flags);
435 for (i = 0; i < gic_vpes; i++) {
436 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
437 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
438 }
439 spin_unlock_irqrestore(&gic_lock, flags);
440 }
441
442 static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
443 {
444 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
445 int i;
446 unsigned long flags;
447
448 spin_lock_irqsave(&gic_lock, flags);
449 for (i = 0; i < gic_vpes; i++) {
450 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
451 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
452 }
453 spin_unlock_irqrestore(&gic_lock, flags);
454 }
455
456 static struct irq_chip gic_all_vpes_local_irq_controller = {
457 .name = "MIPS GIC Local",
458 .irq_mask = gic_mask_local_irq_all_vpes,
459 .irq_unmask = gic_unmask_local_irq_all_vpes,
460 };
461
462 static void __gic_irq_dispatch(void)
463 {
464 unsigned int intr, virq;
465
466 while ((intr = gic_get_local_int()) != GIC_NUM_LOCAL_INTRS) {
467 virq = irq_linear_revmap(gic_irq_domain,
468 GIC_LOCAL_TO_HWIRQ(intr));
469 do_IRQ(virq);
470 }
471
472 while ((intr = gic_get_int()) != gic_shared_intrs) {
473 virq = irq_linear_revmap(gic_irq_domain,
474 GIC_SHARED_TO_HWIRQ(intr));
475 do_IRQ(virq);
476 }
477 }
478
479 static void gic_irq_dispatch(unsigned int irq, struct irq_desc *desc)
480 {
481 __gic_irq_dispatch();
482 }
483
484 #ifdef CONFIG_MIPS_GIC_IPI
485 static int gic_resched_int_base;
486 static int gic_call_int_base;
487
488 unsigned int plat_ipi_resched_int_xlate(unsigned int cpu)
489 {
490 return gic_resched_int_base + cpu;
491 }
492
493 unsigned int plat_ipi_call_int_xlate(unsigned int cpu)
494 {
495 return gic_call_int_base + cpu;
496 }
497
498 static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
499 {
500 scheduler_ipi();
501
502 return IRQ_HANDLED;
503 }
504
505 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
506 {
507 smp_call_function_interrupt();
508
509 return IRQ_HANDLED;
510 }
511
512 static struct irqaction irq_resched = {
513 .handler = ipi_resched_interrupt,
514 .flags = IRQF_PERCPU,
515 .name = "IPI resched"
516 };
517
518 static struct irqaction irq_call = {
519 .handler = ipi_call_interrupt,
520 .flags = IRQF_PERCPU,
521 .name = "IPI call"
522 };
523
524 static __init void gic_ipi_init_one(unsigned int intr, int cpu,
525 struct irqaction *action)
526 {
527 int virq = irq_create_mapping(gic_irq_domain,
528 GIC_SHARED_TO_HWIRQ(intr));
529 int i;
530
531 gic_map_to_vpe(intr, cpu);
532 for (i = 0; i < NR_CPUS; i++)
533 clear_bit(intr, pcpu_masks[i].pcpu_mask);
534 set_bit(intr, pcpu_masks[cpu].pcpu_mask);
535
536 irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
537
538 irq_set_handler(virq, handle_percpu_irq);
539 setup_irq(virq, action);
540 }
541
542 static __init void gic_ipi_init(void)
543 {
544 int i;
545
546 /* Use last 2 * NR_CPUS interrupts as IPIs */
547 gic_resched_int_base = gic_shared_intrs - nr_cpu_ids;
548 gic_call_int_base = gic_resched_int_base - nr_cpu_ids;
549
550 for (i = 0; i < nr_cpu_ids; i++) {
551 gic_ipi_init_one(gic_call_int_base + i, i, &irq_call);
552 gic_ipi_init_one(gic_resched_int_base + i, i, &irq_resched);
553 }
554 }
555 #else
556 static inline void gic_ipi_init(void)
557 {
558 }
559 #endif
560
561 static void __init gic_basic_init(void)
562 {
563 unsigned int i;
564
565 board_bind_eic_interrupt = &gic_bind_eic_interrupt;
566
567 /* Setup defaults */
568 for (i = 0; i < gic_shared_intrs; i++) {
569 gic_set_polarity(i, GIC_POL_POS);
570 gic_set_trigger(i, GIC_TRIG_LEVEL);
571 gic_reset_mask(i);
572 }
573
574 for (i = 0; i < gic_vpes; i++) {
575 unsigned int j;
576
577 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
578 for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
579 if (!gic_local_irq_is_routable(j))
580 continue;
581 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
582 }
583 }
584 }
585
586 static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
587 irq_hw_number_t hw)
588 {
589 int intr = GIC_HWIRQ_TO_LOCAL(hw);
590 int ret = 0;
591 int i;
592 unsigned long flags;
593
594 if (!gic_local_irq_is_routable(intr))
595 return -EPERM;
596
597 /*
598 * HACK: These are all really percpu interrupts, but the rest
599 * of the MIPS kernel code does not use the percpu IRQ API for
600 * the CP0 timer and performance counter interrupts.
601 */
602 if (intr != GIC_LOCAL_INT_TIMER && intr != GIC_LOCAL_INT_PERFCTR) {
603 irq_set_chip_and_handler(virq,
604 &gic_local_irq_controller,
605 handle_percpu_devid_irq);
606 irq_set_percpu_devid(virq);
607 } else {
608 irq_set_chip_and_handler(virq,
609 &gic_all_vpes_local_irq_controller,
610 handle_percpu_irq);
611 }
612
613 spin_lock_irqsave(&gic_lock, flags);
614 for (i = 0; i < gic_vpes; i++) {
615 u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;
616
617 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
618
619 switch (intr) {
620 case GIC_LOCAL_INT_WD:
621 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val);
622 break;
623 case GIC_LOCAL_INT_COMPARE:
624 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), val);
625 break;
626 case GIC_LOCAL_INT_TIMER:
627 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), val);
628 break;
629 case GIC_LOCAL_INT_PERFCTR:
630 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP), val);
631 break;
632 case GIC_LOCAL_INT_SWINT0:
633 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP), val);
634 break;
635 case GIC_LOCAL_INT_SWINT1:
636 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP), val);
637 break;
638 case GIC_LOCAL_INT_FDC:
639 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val);
640 break;
641 default:
642 pr_err("Invalid local IRQ %d\n", intr);
643 ret = -EINVAL;
644 break;
645 }
646 }
647 spin_unlock_irqrestore(&gic_lock, flags);
648
649 return ret;
650 }
651
652 static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
653 irq_hw_number_t hw)
654 {
655 int intr = GIC_HWIRQ_TO_SHARED(hw);
656 unsigned long flags;
657
658 irq_set_chip_and_handler(virq, &gic_level_irq_controller,
659 handle_level_irq);
660
661 spin_lock_irqsave(&gic_lock, flags);
662 gic_map_to_pin(intr, gic_cpu_pin);
663 /* Map to VPE 0 by default */
664 gic_map_to_vpe(intr, 0);
665 set_bit(intr, pcpu_masks[0].pcpu_mask);
666 spin_unlock_irqrestore(&gic_lock, flags);
667
668 return 0;
669 }
670
671 static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
672 irq_hw_number_t hw)
673 {
674 if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
675 return gic_local_irq_domain_map(d, virq, hw);
676 return gic_shared_irq_domain_map(d, virq, hw);
677 }
678
679 static struct irq_domain_ops gic_irq_domain_ops = {
680 .map = gic_irq_domain_map,
681 .xlate = irq_domain_xlate_twocell,
682 };
683
684 void __init gic_init(unsigned long gic_base_addr,
685 unsigned long gic_addrspace_size, unsigned int cpu_vec,
686 unsigned int irqbase)
687 {
688 unsigned int gicconfig;
689
690 gic_base = ioremap_nocache(gic_base_addr, gic_addrspace_size);
691
692 gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
693 gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
694 GIC_SH_CONFIG_NUMINTRS_SHF;
695 gic_shared_intrs = ((gic_shared_intrs + 1) * 8);
696
697 gic_vpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
698 GIC_SH_CONFIG_NUMVPES_SHF;
699 gic_vpes = gic_vpes + 1;
700
701 if (cpu_has_veic) {
702 /* Always use vector 1 in EIC mode */
703 gic_cpu_pin = 0;
704 set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
705 __gic_irq_dispatch);
706 } else {
707 gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
708 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
709 gic_irq_dispatch);
710 }
711
712 gic_irq_domain = irq_domain_add_simple(NULL, GIC_NUM_LOCAL_INTRS +
713 gic_shared_intrs, irqbase,
714 &gic_irq_domain_ops, NULL);
715 if (!gic_irq_domain)
716 panic("Failed to add GIC IRQ domain");
717
718 gic_basic_init();
719
720 gic_ipi_init();
721 }