irqchip: mips-gic: Clean up #includes
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / drivers / irqchip / irq-mips-gic.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
8 */
9 #include <linux/bitmap.h>
10 #include <linux/clocksource.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/irq.h>
14 #include <linux/irqchip/mips-gic.h>
15 #include <linux/sched.h>
16 #include <linux/smp.h>
17
18 #include <asm/setup.h>
19 #include <asm/traps.h>
20
21 unsigned int gic_frequency;
22 unsigned int gic_present;
23
24 struct gic_pcpu_mask {
25 DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS);
26 };
27
28 struct gic_pending_regs {
29 DECLARE_BITMAP(pending, GIC_MAX_INTRS);
30 };
31
32 struct gic_intrmask_regs {
33 DECLARE_BITMAP(intrmask, GIC_MAX_INTRS);
34 };
35
36 static void __iomem *gic_base;
37 static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
38 static struct gic_pending_regs pending_regs[NR_CPUS];
39 static struct gic_intrmask_regs intrmask_regs[NR_CPUS];
40 static DEFINE_SPINLOCK(gic_lock);
41 static struct irq_domain *gic_irq_domain;
42 static int gic_shared_intrs;
43 static int gic_vpes;
44 static unsigned int gic_cpu_pin;
45 static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
46
47 static void __gic_irq_dispatch(void);
48
49 static inline unsigned int gic_read(unsigned int reg)
50 {
51 return __raw_readl(gic_base + reg);
52 }
53
54 static inline void gic_write(unsigned int reg, unsigned int val)
55 {
56 __raw_writel(val, gic_base + reg);
57 }
58
59 static inline void gic_update_bits(unsigned int reg, unsigned int mask,
60 unsigned int val)
61 {
62 unsigned int regval;
63
64 regval = gic_read(reg);
65 regval &= ~mask;
66 regval |= val;
67 gic_write(reg, regval);
68 }
69
70 static inline void gic_reset_mask(unsigned int intr)
71 {
72 gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr),
73 1 << GIC_INTR_BIT(intr));
74 }
75
76 static inline void gic_set_mask(unsigned int intr)
77 {
78 gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr),
79 1 << GIC_INTR_BIT(intr));
80 }
81
82 static inline void gic_set_polarity(unsigned int intr, unsigned int pol)
83 {
84 gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) +
85 GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr),
86 pol << GIC_INTR_BIT(intr));
87 }
88
89 static inline void gic_set_trigger(unsigned int intr, unsigned int trig)
90 {
91 gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) +
92 GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr),
93 trig << GIC_INTR_BIT(intr));
94 }
95
96 static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual)
97 {
98 gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr),
99 1 << GIC_INTR_BIT(intr),
100 dual << GIC_INTR_BIT(intr));
101 }
102
103 static inline void gic_map_to_pin(unsigned int intr, unsigned int pin)
104 {
105 gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) +
106 GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin);
107 }
108
109 static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
110 {
111 gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_VPE_BASE) +
112 GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe),
113 GIC_SH_MAP_TO_VPE_REG_BIT(vpe));
114 }
115
116 #if defined(CONFIG_CSRC_GIC) || defined(CONFIG_CEVT_GIC)
117 cycle_t gic_read_count(void)
118 {
119 unsigned int hi, hi2, lo;
120
121 do {
122 hi = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
123 lo = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_31_00));
124 hi2 = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
125 } while (hi2 != hi);
126
127 return (((cycle_t) hi) << 32) + lo;
128 }
129
130 unsigned int gic_get_count_width(void)
131 {
132 unsigned int bits, config;
133
134 config = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
135 bits = 32 + 4 * ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >>
136 GIC_SH_CONFIG_COUNTBITS_SHF);
137
138 return bits;
139 }
140
141 void gic_write_compare(cycle_t cnt)
142 {
143 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
144 (int)(cnt >> 32));
145 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
146 (int)(cnt & 0xffffffff));
147 }
148
149 void gic_write_cpu_compare(cycle_t cnt, int cpu)
150 {
151 unsigned long flags;
152
153 local_irq_save(flags);
154
155 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
156 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
157 (int)(cnt >> 32));
158 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
159 (int)(cnt & 0xffffffff));
160
161 local_irq_restore(flags);
162 }
163
164 cycle_t gic_read_compare(void)
165 {
166 unsigned int hi, lo;
167
168 hi = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
169 lo = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
170
171 return (((cycle_t) hi) << 32) + lo;
172 }
173 #endif
174
175 static bool gic_local_irq_is_routable(int intr)
176 {
177 u32 vpe_ctl;
178
179 /* All local interrupts are routable in EIC mode. */
180 if (cpu_has_veic)
181 return true;
182
183 vpe_ctl = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
184 switch (intr) {
185 case GIC_LOCAL_INT_TIMER:
186 return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK;
187 case GIC_LOCAL_INT_PERFCTR:
188 return vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK;
189 case GIC_LOCAL_INT_FDC:
190 return vpe_ctl & GIC_VPE_CTL_FDC_RTBL_MSK;
191 case GIC_LOCAL_INT_SWINT0:
192 case GIC_LOCAL_INT_SWINT1:
193 return vpe_ctl & GIC_VPE_CTL_SWINT_RTBL_MSK;
194 default:
195 return true;
196 }
197 }
198
199 unsigned int gic_get_timer_pending(void)
200 {
201 unsigned int vpe_pending;
202
203 vpe_pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
204 return vpe_pending & GIC_VPE_PEND_TIMER_MSK;
205 }
206
207 static void gic_bind_eic_interrupt(int irq, int set)
208 {
209 /* Convert irq vector # to hw int # */
210 irq -= GIC_PIN_TO_VEC_OFFSET;
211
212 /* Set irq to use shadow set */
213 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_EIC_SHADOW_SET_BASE) +
214 GIC_VPE_EIC_SS(irq), set);
215 }
216
217 void gic_send_ipi(unsigned int intr)
218 {
219 gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), 0x80000000 | intr);
220 }
221
222 int gic_get_c0_compare_int(void)
223 {
224 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
225 return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
226 return irq_create_mapping(gic_irq_domain,
227 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
228 }
229
230 int gic_get_c0_perfcount_int(void)
231 {
232 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
233 /* Is the erformance counter shared with the timer? */
234 if (cp0_perfcount_irq < 0)
235 return -1;
236 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
237 }
238 return irq_create_mapping(gic_irq_domain,
239 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
240 }
241
242 static unsigned int gic_get_int(void)
243 {
244 unsigned int i;
245 unsigned long *pending, *intrmask, *pcpu_mask;
246 unsigned long pending_reg, intrmask_reg;
247
248 /* Get per-cpu bitmaps */
249 pending = pending_regs[smp_processor_id()].pending;
250 intrmask = intrmask_regs[smp_processor_id()].intrmask;
251 pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
252
253 pending_reg = GIC_REG(SHARED, GIC_SH_PEND);
254 intrmask_reg = GIC_REG(SHARED, GIC_SH_MASK);
255
256 for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) {
257 pending[i] = gic_read(pending_reg);
258 intrmask[i] = gic_read(intrmask_reg);
259 pending_reg += 0x4;
260 intrmask_reg += 0x4;
261 }
262
263 bitmap_and(pending, pending, intrmask, gic_shared_intrs);
264 bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
265
266 return find_first_bit(pending, gic_shared_intrs);
267 }
268
269 static void gic_mask_irq(struct irq_data *d)
270 {
271 gic_reset_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
272 }
273
274 static void gic_unmask_irq(struct irq_data *d)
275 {
276 gic_set_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
277 }
278
279 static void gic_ack_irq(struct irq_data *d)
280 {
281 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
282
283 gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), irq);
284 }
285
286 static int gic_set_type(struct irq_data *d, unsigned int type)
287 {
288 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
289 unsigned long flags;
290 bool is_edge;
291
292 spin_lock_irqsave(&gic_lock, flags);
293 switch (type & IRQ_TYPE_SENSE_MASK) {
294 case IRQ_TYPE_EDGE_FALLING:
295 gic_set_polarity(irq, GIC_POL_NEG);
296 gic_set_trigger(irq, GIC_TRIG_EDGE);
297 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
298 is_edge = true;
299 break;
300 case IRQ_TYPE_EDGE_RISING:
301 gic_set_polarity(irq, GIC_POL_POS);
302 gic_set_trigger(irq, GIC_TRIG_EDGE);
303 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
304 is_edge = true;
305 break;
306 case IRQ_TYPE_EDGE_BOTH:
307 /* polarity is irrelevant in this case */
308 gic_set_trigger(irq, GIC_TRIG_EDGE);
309 gic_set_dual_edge(irq, GIC_TRIG_DUAL_ENABLE);
310 is_edge = true;
311 break;
312 case IRQ_TYPE_LEVEL_LOW:
313 gic_set_polarity(irq, GIC_POL_NEG);
314 gic_set_trigger(irq, GIC_TRIG_LEVEL);
315 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
316 is_edge = false;
317 break;
318 case IRQ_TYPE_LEVEL_HIGH:
319 default:
320 gic_set_polarity(irq, GIC_POL_POS);
321 gic_set_trigger(irq, GIC_TRIG_LEVEL);
322 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
323 is_edge = false;
324 break;
325 }
326
327 if (is_edge) {
328 __irq_set_chip_handler_name_locked(d->irq,
329 &gic_edge_irq_controller,
330 handle_edge_irq, NULL);
331 } else {
332 __irq_set_chip_handler_name_locked(d->irq,
333 &gic_level_irq_controller,
334 handle_level_irq, NULL);
335 }
336 spin_unlock_irqrestore(&gic_lock, flags);
337
338 return 0;
339 }
340
341 #ifdef CONFIG_SMP
342 static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
343 bool force)
344 {
345 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
346 cpumask_t tmp = CPU_MASK_NONE;
347 unsigned long flags;
348 int i;
349
350 cpumask_and(&tmp, cpumask, cpu_online_mask);
351 if (cpus_empty(tmp))
352 return -EINVAL;
353
354 /* Assumption : cpumask refers to a single CPU */
355 spin_lock_irqsave(&gic_lock, flags);
356
357 /* Re-route this IRQ */
358 gic_map_to_vpe(irq, first_cpu(tmp));
359
360 /* Update the pcpu_masks */
361 for (i = 0; i < NR_CPUS; i++)
362 clear_bit(irq, pcpu_masks[i].pcpu_mask);
363 set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
364
365 cpumask_copy(d->affinity, cpumask);
366 spin_unlock_irqrestore(&gic_lock, flags);
367
368 return IRQ_SET_MASK_OK_NOCOPY;
369 }
370 #endif
371
372 static struct irq_chip gic_level_irq_controller = {
373 .name = "MIPS GIC",
374 .irq_mask = gic_mask_irq,
375 .irq_unmask = gic_unmask_irq,
376 .irq_set_type = gic_set_type,
377 #ifdef CONFIG_SMP
378 .irq_set_affinity = gic_set_affinity,
379 #endif
380 };
381
382 static struct irq_chip gic_edge_irq_controller = {
383 .name = "MIPS GIC",
384 .irq_ack = gic_ack_irq,
385 .irq_mask = gic_mask_irq,
386 .irq_unmask = gic_unmask_irq,
387 .irq_set_type = gic_set_type,
388 #ifdef CONFIG_SMP
389 .irq_set_affinity = gic_set_affinity,
390 #endif
391 };
392
393 static unsigned int gic_get_local_int(void)
394 {
395 unsigned long pending, masked;
396
397 pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
398 masked = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
399
400 bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);
401
402 return find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
403 }
404
405 static void gic_mask_local_irq(struct irq_data *d)
406 {
407 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
408
409 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
410 }
411
412 static void gic_unmask_local_irq(struct irq_data *d)
413 {
414 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
415
416 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
417 }
418
419 static struct irq_chip gic_local_irq_controller = {
420 .name = "MIPS GIC Local",
421 .irq_mask = gic_mask_local_irq,
422 .irq_unmask = gic_unmask_local_irq,
423 };
424
425 static void gic_mask_local_irq_all_vpes(struct irq_data *d)
426 {
427 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
428 int i;
429 unsigned long flags;
430
431 spin_lock_irqsave(&gic_lock, flags);
432 for (i = 0; i < gic_vpes; i++) {
433 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
434 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
435 }
436 spin_unlock_irqrestore(&gic_lock, flags);
437 }
438
439 static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
440 {
441 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
442 int i;
443 unsigned long flags;
444
445 spin_lock_irqsave(&gic_lock, flags);
446 for (i = 0; i < gic_vpes; i++) {
447 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
448 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
449 }
450 spin_unlock_irqrestore(&gic_lock, flags);
451 }
452
453 static struct irq_chip gic_all_vpes_local_irq_controller = {
454 .name = "MIPS GIC Local",
455 .irq_mask = gic_mask_local_irq_all_vpes,
456 .irq_unmask = gic_unmask_local_irq_all_vpes,
457 };
458
459 static void __gic_irq_dispatch(void)
460 {
461 unsigned int intr, virq;
462
463 while ((intr = gic_get_local_int()) != GIC_NUM_LOCAL_INTRS) {
464 virq = irq_linear_revmap(gic_irq_domain,
465 GIC_LOCAL_TO_HWIRQ(intr));
466 do_IRQ(virq);
467 }
468
469 while ((intr = gic_get_int()) != gic_shared_intrs) {
470 virq = irq_linear_revmap(gic_irq_domain,
471 GIC_SHARED_TO_HWIRQ(intr));
472 do_IRQ(virq);
473 }
474 }
475
476 static void gic_irq_dispatch(unsigned int irq, struct irq_desc *desc)
477 {
478 __gic_irq_dispatch();
479 }
480
481 #ifdef CONFIG_MIPS_GIC_IPI
482 static int gic_resched_int_base;
483 static int gic_call_int_base;
484
485 unsigned int plat_ipi_resched_int_xlate(unsigned int cpu)
486 {
487 return gic_resched_int_base + cpu;
488 }
489
490 unsigned int plat_ipi_call_int_xlate(unsigned int cpu)
491 {
492 return gic_call_int_base + cpu;
493 }
494
495 static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
496 {
497 scheduler_ipi();
498
499 return IRQ_HANDLED;
500 }
501
502 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
503 {
504 smp_call_function_interrupt();
505
506 return IRQ_HANDLED;
507 }
508
509 static struct irqaction irq_resched = {
510 .handler = ipi_resched_interrupt,
511 .flags = IRQF_PERCPU,
512 .name = "IPI resched"
513 };
514
515 static struct irqaction irq_call = {
516 .handler = ipi_call_interrupt,
517 .flags = IRQF_PERCPU,
518 .name = "IPI call"
519 };
520
521 static __init void gic_ipi_init_one(unsigned int intr, int cpu,
522 struct irqaction *action)
523 {
524 int virq = irq_create_mapping(gic_irq_domain,
525 GIC_SHARED_TO_HWIRQ(intr));
526 int i;
527
528 gic_map_to_vpe(intr, cpu);
529 for (i = 0; i < NR_CPUS; i++)
530 clear_bit(intr, pcpu_masks[i].pcpu_mask);
531 set_bit(intr, pcpu_masks[cpu].pcpu_mask);
532
533 irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
534
535 irq_set_handler(virq, handle_percpu_irq);
536 setup_irq(virq, action);
537 }
538
539 static __init void gic_ipi_init(void)
540 {
541 int i;
542
543 /* Use last 2 * NR_CPUS interrupts as IPIs */
544 gic_resched_int_base = gic_shared_intrs - nr_cpu_ids;
545 gic_call_int_base = gic_resched_int_base - nr_cpu_ids;
546
547 for (i = 0; i < nr_cpu_ids; i++) {
548 gic_ipi_init_one(gic_call_int_base + i, i, &irq_call);
549 gic_ipi_init_one(gic_resched_int_base + i, i, &irq_resched);
550 }
551 }
552 #else
553 static inline void gic_ipi_init(void)
554 {
555 }
556 #endif
557
558 static void __init gic_basic_init(void)
559 {
560 unsigned int i;
561
562 board_bind_eic_interrupt = &gic_bind_eic_interrupt;
563
564 /* Setup defaults */
565 for (i = 0; i < gic_shared_intrs; i++) {
566 gic_set_polarity(i, GIC_POL_POS);
567 gic_set_trigger(i, GIC_TRIG_LEVEL);
568 gic_reset_mask(i);
569 }
570
571 for (i = 0; i < gic_vpes; i++) {
572 unsigned int j;
573
574 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
575 for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
576 if (!gic_local_irq_is_routable(j))
577 continue;
578 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
579 }
580 }
581 }
582
583 static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
584 irq_hw_number_t hw)
585 {
586 int intr = GIC_HWIRQ_TO_LOCAL(hw);
587 int ret = 0;
588 int i;
589 unsigned long flags;
590
591 if (!gic_local_irq_is_routable(intr))
592 return -EPERM;
593
594 /*
595 * HACK: These are all really percpu interrupts, but the rest
596 * of the MIPS kernel code does not use the percpu IRQ API for
597 * the CP0 timer and performance counter interrupts.
598 */
599 if (intr != GIC_LOCAL_INT_TIMER && intr != GIC_LOCAL_INT_PERFCTR) {
600 irq_set_chip_and_handler(virq,
601 &gic_local_irq_controller,
602 handle_percpu_devid_irq);
603 irq_set_percpu_devid(virq);
604 } else {
605 irq_set_chip_and_handler(virq,
606 &gic_all_vpes_local_irq_controller,
607 handle_percpu_irq);
608 }
609
610 spin_lock_irqsave(&gic_lock, flags);
611 for (i = 0; i < gic_vpes; i++) {
612 u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;
613
614 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
615
616 switch (intr) {
617 case GIC_LOCAL_INT_WD:
618 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val);
619 break;
620 case GIC_LOCAL_INT_COMPARE:
621 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), val);
622 break;
623 case GIC_LOCAL_INT_TIMER:
624 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), val);
625 break;
626 case GIC_LOCAL_INT_PERFCTR:
627 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP), val);
628 break;
629 case GIC_LOCAL_INT_SWINT0:
630 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP), val);
631 break;
632 case GIC_LOCAL_INT_SWINT1:
633 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP), val);
634 break;
635 case GIC_LOCAL_INT_FDC:
636 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val);
637 break;
638 default:
639 pr_err("Invalid local IRQ %d\n", intr);
640 ret = -EINVAL;
641 break;
642 }
643 }
644 spin_unlock_irqrestore(&gic_lock, flags);
645
646 return ret;
647 }
648
649 static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
650 irq_hw_number_t hw)
651 {
652 int intr = GIC_HWIRQ_TO_SHARED(hw);
653 unsigned long flags;
654
655 irq_set_chip_and_handler(virq, &gic_level_irq_controller,
656 handle_level_irq);
657
658 spin_lock_irqsave(&gic_lock, flags);
659 gic_map_to_pin(intr, gic_cpu_pin);
660 /* Map to VPE 0 by default */
661 gic_map_to_vpe(intr, 0);
662 set_bit(intr, pcpu_masks[0].pcpu_mask);
663 spin_unlock_irqrestore(&gic_lock, flags);
664
665 return 0;
666 }
667
668 static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
669 irq_hw_number_t hw)
670 {
671 if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
672 return gic_local_irq_domain_map(d, virq, hw);
673 return gic_shared_irq_domain_map(d, virq, hw);
674 }
675
676 static struct irq_domain_ops gic_irq_domain_ops = {
677 .map = gic_irq_domain_map,
678 .xlate = irq_domain_xlate_twocell,
679 };
680
681 void __init gic_init(unsigned long gic_base_addr,
682 unsigned long gic_addrspace_size, unsigned int cpu_vec,
683 unsigned int irqbase)
684 {
685 unsigned int gicconfig;
686
687 gic_base = ioremap_nocache(gic_base_addr, gic_addrspace_size);
688
689 gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
690 gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
691 GIC_SH_CONFIG_NUMINTRS_SHF;
692 gic_shared_intrs = ((gic_shared_intrs + 1) * 8);
693
694 gic_vpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
695 GIC_SH_CONFIG_NUMVPES_SHF;
696 gic_vpes = gic_vpes + 1;
697
698 if (cpu_has_veic) {
699 /* Always use vector 1 in EIC mode */
700 gic_cpu_pin = 0;
701 set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
702 __gic_irq_dispatch);
703 } else {
704 gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
705 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
706 gic_irq_dispatch);
707 }
708
709 gic_irq_domain = irq_domain_add_simple(NULL, GIC_NUM_LOCAL_INTRS +
710 gic_shared_intrs, irqbase,
711 &gic_irq_domain_ops, NULL);
712 if (!gic_irq_domain)
713 panic("Failed to add GIC IRQ domain");
714
715 gic_basic_init();
716
717 gic_ipi_init();
718 }