Input: sur40 - skip all blobs that are not touches
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / irqchip / irq-mips-gic.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
8 */
9 #include <linux/bitmap.h>
10 #include <linux/clocksource.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/irq.h>
14 #include <linux/irqchip.h>
15 #include <linux/irqchip/mips-gic.h>
16 #include <linux/of_address.h>
17 #include <linux/sched.h>
18 #include <linux/smp.h>
19
20 #include <asm/mips-cm.h>
21 #include <asm/setup.h>
22 #include <asm/traps.h>
23
24 #include <dt-bindings/interrupt-controller/mips-gic.h>
25
26 unsigned int gic_present;
27
28 struct gic_pcpu_mask {
29 DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS);
30 };
31
32 static unsigned long __gic_base_addr;
33
34 static void __iomem *gic_base;
35 static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
36 static DEFINE_SPINLOCK(gic_lock);
37 static struct irq_domain *gic_irq_domain;
38 static struct irq_domain *gic_ipi_domain;
39 static int gic_shared_intrs;
40 static int gic_vpes;
41 static unsigned int gic_cpu_pin;
42 static unsigned int timer_cpu_pin;
43 static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
44 DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
45 DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS);
46
47 static void __gic_irq_dispatch(void);
48
49 static inline u32 gic_read32(unsigned int reg)
50 {
51 return __raw_readl(gic_base + reg);
52 }
53
54 static inline u64 gic_read64(unsigned int reg)
55 {
56 return __raw_readq(gic_base + reg);
57 }
58
59 static inline unsigned long gic_read(unsigned int reg)
60 {
61 if (!mips_cm_is64)
62 return gic_read32(reg);
63 else
64 return gic_read64(reg);
65 }
66
67 static inline void gic_write32(unsigned int reg, u32 val)
68 {
69 return __raw_writel(val, gic_base + reg);
70 }
71
72 static inline void gic_write64(unsigned int reg, u64 val)
73 {
74 return __raw_writeq(val, gic_base + reg);
75 }
76
77 static inline void gic_write(unsigned int reg, unsigned long val)
78 {
79 if (!mips_cm_is64)
80 return gic_write32(reg, (u32)val);
81 else
82 return gic_write64(reg, (u64)val);
83 }
84
85 static inline void gic_update_bits(unsigned int reg, unsigned long mask,
86 unsigned long val)
87 {
88 unsigned long regval;
89
90 regval = gic_read(reg);
91 regval &= ~mask;
92 regval |= val;
93 gic_write(reg, regval);
94 }
95
96 static inline void gic_reset_mask(unsigned int intr)
97 {
98 gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr),
99 1ul << GIC_INTR_BIT(intr));
100 }
101
102 static inline void gic_set_mask(unsigned int intr)
103 {
104 gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr),
105 1ul << GIC_INTR_BIT(intr));
106 }
107
108 static inline void gic_set_polarity(unsigned int intr, unsigned int pol)
109 {
110 gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) +
111 GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr),
112 (unsigned long)pol << GIC_INTR_BIT(intr));
113 }
114
115 static inline void gic_set_trigger(unsigned int intr, unsigned int trig)
116 {
117 gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) +
118 GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr),
119 (unsigned long)trig << GIC_INTR_BIT(intr));
120 }
121
122 static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual)
123 {
124 gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr),
125 1ul << GIC_INTR_BIT(intr),
126 (unsigned long)dual << GIC_INTR_BIT(intr));
127 }
128
129 static inline void gic_map_to_pin(unsigned int intr, unsigned int pin)
130 {
131 gic_write32(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) +
132 GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin);
133 }
134
135 static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
136 {
137 gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_VPE_BASE) +
138 GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe),
139 GIC_SH_MAP_TO_VPE_REG_BIT(vpe));
140 }
141
142 #ifdef CONFIG_CLKSRC_MIPS_GIC
143 u64 gic_read_count(void)
144 {
145 unsigned int hi, hi2, lo;
146
147 if (mips_cm_is64)
148 return (u64)gic_read(GIC_REG(SHARED, GIC_SH_COUNTER));
149
150 do {
151 hi = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
152 lo = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_31_00));
153 hi2 = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
154 } while (hi2 != hi);
155
156 return (((u64) hi) << 32) + lo;
157 }
158
159 unsigned int gic_get_count_width(void)
160 {
161 unsigned int bits, config;
162
163 config = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
164 bits = 32 + 4 * ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >>
165 GIC_SH_CONFIG_COUNTBITS_SHF);
166
167 return bits;
168 }
169
170 void gic_write_compare(u64 cnt)
171 {
172 if (mips_cm_is64) {
173 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt);
174 } else {
175 gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
176 (int)(cnt >> 32));
177 gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
178 (int)(cnt & 0xffffffff));
179 }
180 }
181
182 void gic_write_cpu_compare(u64 cnt, int cpu)
183 {
184 unsigned long flags;
185
186 local_irq_save(flags);
187
188 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), mips_cm_vp_id(cpu));
189
190 if (mips_cm_is64) {
191 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE), cnt);
192 } else {
193 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
194 (int)(cnt >> 32));
195 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
196 (int)(cnt & 0xffffffff));
197 }
198
199 local_irq_restore(flags);
200 }
201
202 u64 gic_read_compare(void)
203 {
204 unsigned int hi, lo;
205
206 if (mips_cm_is64)
207 return (u64)gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE));
208
209 hi = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
210 lo = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
211
212 return (((u64) hi) << 32) + lo;
213 }
214
215 void gic_start_count(void)
216 {
217 u32 gicconfig;
218
219 /* Start the counter */
220 gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
221 gicconfig &= ~(1 << GIC_SH_CONFIG_COUNTSTOP_SHF);
222 gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
223 }
224
225 void gic_stop_count(void)
226 {
227 u32 gicconfig;
228
229 /* Stop the counter */
230 gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
231 gicconfig |= 1 << GIC_SH_CONFIG_COUNTSTOP_SHF;
232 gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
233 }
234
235 #endif
236
237 unsigned gic_read_local_vp_id(void)
238 {
239 unsigned long ident;
240
241 ident = gic_read(GIC_REG(VPE_LOCAL, GIC_VP_IDENT));
242 return ident & GIC_VP_IDENT_VCNUM_MSK;
243 }
244
245 static bool gic_local_irq_is_routable(int intr)
246 {
247 u32 vpe_ctl;
248
249 /* All local interrupts are routable in EIC mode. */
250 if (cpu_has_veic)
251 return true;
252
253 vpe_ctl = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
254 switch (intr) {
255 case GIC_LOCAL_INT_TIMER:
256 return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK;
257 case GIC_LOCAL_INT_PERFCTR:
258 return vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK;
259 case GIC_LOCAL_INT_FDC:
260 return vpe_ctl & GIC_VPE_CTL_FDC_RTBL_MSK;
261 case GIC_LOCAL_INT_SWINT0:
262 case GIC_LOCAL_INT_SWINT1:
263 return vpe_ctl & GIC_VPE_CTL_SWINT_RTBL_MSK;
264 default:
265 return true;
266 }
267 }
268
269 static void gic_bind_eic_interrupt(int irq, int set)
270 {
271 /* Convert irq vector # to hw int # */
272 irq -= GIC_PIN_TO_VEC_OFFSET;
273
274 /* Set irq to use shadow set */
275 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_EIC_SHADOW_SET_BASE) +
276 GIC_VPE_EIC_SS(irq), set);
277 }
278
279 static void gic_send_ipi(struct irq_data *d, unsigned int cpu)
280 {
281 irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d));
282
283 gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(hwirq));
284 }
285
286 int gic_get_c0_compare_int(void)
287 {
288 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
289 return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
290 return irq_create_mapping(gic_irq_domain,
291 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
292 }
293
294 int gic_get_c0_perfcount_int(void)
295 {
296 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
297 /* Is the performance counter shared with the timer? */
298 if (cp0_perfcount_irq < 0)
299 return -1;
300 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
301 }
302 return irq_create_mapping(gic_irq_domain,
303 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
304 }
305
306 int gic_get_c0_fdc_int(void)
307 {
308 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) {
309 /* Is the FDC IRQ even present? */
310 if (cp0_fdc_irq < 0)
311 return -1;
312 return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
313 }
314
315 return irq_create_mapping(gic_irq_domain,
316 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
317 }
318
319 int gic_get_usm_range(struct resource *gic_usm_res)
320 {
321 if (!gic_present)
322 return -1;
323
324 gic_usm_res->start = __gic_base_addr + USM_VISIBLE_SECTION_OFS;
325 gic_usm_res->end = gic_usm_res->start + (USM_VISIBLE_SECTION_SIZE - 1);
326
327 return 0;
328 }
329
330 static void gic_handle_shared_int(bool chained)
331 {
332 unsigned int i, intr, virq, gic_reg_step = mips_cm_is64 ? 8 : 4;
333 unsigned long *pcpu_mask;
334 unsigned long pending_reg, intrmask_reg;
335 DECLARE_BITMAP(pending, GIC_MAX_INTRS);
336 DECLARE_BITMAP(intrmask, GIC_MAX_INTRS);
337
338 /* Get per-cpu bitmaps */
339 pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
340
341 pending_reg = GIC_REG(SHARED, GIC_SH_PEND);
342 intrmask_reg = GIC_REG(SHARED, GIC_SH_MASK);
343
344 for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) {
345 pending[i] = gic_read(pending_reg);
346 intrmask[i] = gic_read(intrmask_reg);
347 pending_reg += gic_reg_step;
348 intrmask_reg += gic_reg_step;
349
350 if (!IS_ENABLED(CONFIG_64BIT) || mips_cm_is64)
351 continue;
352
353 pending[i] |= (u64)gic_read(pending_reg) << 32;
354 intrmask[i] |= (u64)gic_read(intrmask_reg) << 32;
355 pending_reg += gic_reg_step;
356 intrmask_reg += gic_reg_step;
357 }
358
359 bitmap_and(pending, pending, intrmask, gic_shared_intrs);
360 bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
361
362 for_each_set_bit(intr, pending, gic_shared_intrs) {
363 virq = irq_linear_revmap(gic_irq_domain,
364 GIC_SHARED_TO_HWIRQ(intr));
365 if (chained)
366 generic_handle_irq(virq);
367 else
368 do_IRQ(virq);
369 }
370 }
371
372 static void gic_mask_irq(struct irq_data *d)
373 {
374 gic_reset_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
375 }
376
377 static void gic_unmask_irq(struct irq_data *d)
378 {
379 gic_set_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
380 }
381
382 static void gic_ack_irq(struct irq_data *d)
383 {
384 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
385
386 gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_CLR(irq));
387 }
388
389 static int gic_set_type(struct irq_data *d, unsigned int type)
390 {
391 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
392 unsigned long flags;
393 bool is_edge;
394
395 spin_lock_irqsave(&gic_lock, flags);
396 switch (type & IRQ_TYPE_SENSE_MASK) {
397 case IRQ_TYPE_EDGE_FALLING:
398 gic_set_polarity(irq, GIC_POL_NEG);
399 gic_set_trigger(irq, GIC_TRIG_EDGE);
400 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
401 is_edge = true;
402 break;
403 case IRQ_TYPE_EDGE_RISING:
404 gic_set_polarity(irq, GIC_POL_POS);
405 gic_set_trigger(irq, GIC_TRIG_EDGE);
406 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
407 is_edge = true;
408 break;
409 case IRQ_TYPE_EDGE_BOTH:
410 /* polarity is irrelevant in this case */
411 gic_set_trigger(irq, GIC_TRIG_EDGE);
412 gic_set_dual_edge(irq, GIC_TRIG_DUAL_ENABLE);
413 is_edge = true;
414 break;
415 case IRQ_TYPE_LEVEL_LOW:
416 gic_set_polarity(irq, GIC_POL_NEG);
417 gic_set_trigger(irq, GIC_TRIG_LEVEL);
418 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
419 is_edge = false;
420 break;
421 case IRQ_TYPE_LEVEL_HIGH:
422 default:
423 gic_set_polarity(irq, GIC_POL_POS);
424 gic_set_trigger(irq, GIC_TRIG_LEVEL);
425 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
426 is_edge = false;
427 break;
428 }
429
430 if (is_edge)
431 irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
432 handle_edge_irq, NULL);
433 else
434 irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,
435 handle_level_irq, NULL);
436 spin_unlock_irqrestore(&gic_lock, flags);
437
438 return 0;
439 }
440
441 #ifdef CONFIG_SMP
442 static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
443 bool force)
444 {
445 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
446 cpumask_t tmp = CPU_MASK_NONE;
447 unsigned long flags;
448 int i;
449
450 cpumask_and(&tmp, cpumask, cpu_online_mask);
451 if (cpumask_empty(&tmp))
452 return -EINVAL;
453
454 /* Assumption : cpumask refers to a single CPU */
455 spin_lock_irqsave(&gic_lock, flags);
456
457 /* Re-route this IRQ */
458 gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
459
460 /* Update the pcpu_masks */
461 for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
462 clear_bit(irq, pcpu_masks[i].pcpu_mask);
463 set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
464
465 cpumask_copy(irq_data_get_affinity_mask(d), cpumask);
466 spin_unlock_irqrestore(&gic_lock, flags);
467
468 return IRQ_SET_MASK_OK_NOCOPY;
469 }
470 #endif
471
472 static struct irq_chip gic_level_irq_controller = {
473 .name = "MIPS GIC",
474 .irq_mask = gic_mask_irq,
475 .irq_unmask = gic_unmask_irq,
476 .irq_set_type = gic_set_type,
477 #ifdef CONFIG_SMP
478 .irq_set_affinity = gic_set_affinity,
479 #endif
480 };
481
482 static struct irq_chip gic_edge_irq_controller = {
483 .name = "MIPS GIC",
484 .irq_ack = gic_ack_irq,
485 .irq_mask = gic_mask_irq,
486 .irq_unmask = gic_unmask_irq,
487 .irq_set_type = gic_set_type,
488 #ifdef CONFIG_SMP
489 .irq_set_affinity = gic_set_affinity,
490 #endif
491 .ipi_send_single = gic_send_ipi,
492 };
493
494 static void gic_handle_local_int(bool chained)
495 {
496 unsigned long pending, masked;
497 unsigned int intr, virq;
498
499 pending = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
500 masked = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
501
502 bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);
503
504 for_each_set_bit(intr, &pending, GIC_NUM_LOCAL_INTRS) {
505 virq = irq_linear_revmap(gic_irq_domain,
506 GIC_LOCAL_TO_HWIRQ(intr));
507 if (chained)
508 generic_handle_irq(virq);
509 else
510 do_IRQ(virq);
511 }
512 }
513
514 static void gic_mask_local_irq(struct irq_data *d)
515 {
516 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
517
518 gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
519 }
520
521 static void gic_unmask_local_irq(struct irq_data *d)
522 {
523 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
524
525 gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
526 }
527
528 static struct irq_chip gic_local_irq_controller = {
529 .name = "MIPS GIC Local",
530 .irq_mask = gic_mask_local_irq,
531 .irq_unmask = gic_unmask_local_irq,
532 };
533
534 static void gic_mask_local_irq_all_vpes(struct irq_data *d)
535 {
536 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
537 int i;
538 unsigned long flags;
539
540 spin_lock_irqsave(&gic_lock, flags);
541 for (i = 0; i < gic_vpes; i++) {
542 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
543 mips_cm_vp_id(i));
544 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
545 }
546 spin_unlock_irqrestore(&gic_lock, flags);
547 }
548
549 static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
550 {
551 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
552 int i;
553 unsigned long flags;
554
555 spin_lock_irqsave(&gic_lock, flags);
556 for (i = 0; i < gic_vpes; i++) {
557 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
558 mips_cm_vp_id(i));
559 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
560 }
561 spin_unlock_irqrestore(&gic_lock, flags);
562 }
563
564 static struct irq_chip gic_all_vpes_local_irq_controller = {
565 .name = "MIPS GIC Local",
566 .irq_mask = gic_mask_local_irq_all_vpes,
567 .irq_unmask = gic_unmask_local_irq_all_vpes,
568 };
569
570 static void __gic_irq_dispatch(void)
571 {
572 gic_handle_local_int(false);
573 gic_handle_shared_int(false);
574 }
575
576 static void gic_irq_dispatch(struct irq_desc *desc)
577 {
578 gic_handle_local_int(true);
579 gic_handle_shared_int(true);
580 }
581
582 static void __init gic_basic_init(void)
583 {
584 unsigned int i;
585
586 board_bind_eic_interrupt = &gic_bind_eic_interrupt;
587
588 /* Setup defaults */
589 for (i = 0; i < gic_shared_intrs; i++) {
590 gic_set_polarity(i, GIC_POL_POS);
591 gic_set_trigger(i, GIC_TRIG_LEVEL);
592 gic_reset_mask(i);
593 }
594
595 for (i = 0; i < gic_vpes; i++) {
596 unsigned int j;
597
598 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
599 mips_cm_vp_id(i));
600 for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
601 if (!gic_local_irq_is_routable(j))
602 continue;
603 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
604 }
605 }
606 }
607
608 static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
609 irq_hw_number_t hw)
610 {
611 int intr = GIC_HWIRQ_TO_LOCAL(hw);
612 int ret = 0;
613 int i;
614 unsigned long flags;
615
616 if (!gic_local_irq_is_routable(intr))
617 return -EPERM;
618
619 spin_lock_irqsave(&gic_lock, flags);
620 for (i = 0; i < gic_vpes; i++) {
621 u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;
622
623 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
624 mips_cm_vp_id(i));
625
626 switch (intr) {
627 case GIC_LOCAL_INT_WD:
628 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val);
629 break;
630 case GIC_LOCAL_INT_COMPARE:
631 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP),
632 val);
633 break;
634 case GIC_LOCAL_INT_TIMER:
635 /* CONFIG_MIPS_CMP workaround (see __gic_init) */
636 val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin;
637 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
638 val);
639 break;
640 case GIC_LOCAL_INT_PERFCTR:
641 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
642 val);
643 break;
644 case GIC_LOCAL_INT_SWINT0:
645 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP),
646 val);
647 break;
648 case GIC_LOCAL_INT_SWINT1:
649 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP),
650 val);
651 break;
652 case GIC_LOCAL_INT_FDC:
653 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val);
654 break;
655 default:
656 pr_err("Invalid local IRQ %d\n", intr);
657 ret = -EINVAL;
658 break;
659 }
660 }
661 spin_unlock_irqrestore(&gic_lock, flags);
662
663 return ret;
664 }
665
666 static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
667 irq_hw_number_t hw, unsigned int vpe)
668 {
669 int intr = GIC_HWIRQ_TO_SHARED(hw);
670 unsigned long flags;
671 int i;
672
673 spin_lock_irqsave(&gic_lock, flags);
674 gic_map_to_pin(intr, gic_cpu_pin);
675 gic_map_to_vpe(intr, mips_cm_vp_id(vpe));
676 for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
677 clear_bit(intr, pcpu_masks[i].pcpu_mask);
678 set_bit(intr, pcpu_masks[vpe].pcpu_mask);
679 spin_unlock_irqrestore(&gic_lock, flags);
680
681 return 0;
682 }
683
684 static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
685 const u32 *intspec, unsigned int intsize,
686 irq_hw_number_t *out_hwirq,
687 unsigned int *out_type)
688 {
689 if (intsize != 3)
690 return -EINVAL;
691
692 if (intspec[0] == GIC_SHARED)
693 *out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]);
694 else if (intspec[0] == GIC_LOCAL)
695 *out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]);
696 else
697 return -EINVAL;
698 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
699
700 return 0;
701 }
702
703 static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
704 irq_hw_number_t hwirq)
705 {
706 int err;
707
708 if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
709 /* verify that shared irqs don't conflict with an IPI irq */
710 if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq), ipi_resrv))
711 return -EBUSY;
712
713 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
714 &gic_level_irq_controller,
715 NULL);
716 if (err)
717 return err;
718
719 return gic_shared_irq_domain_map(d, virq, hwirq, 0);
720 }
721
722 switch (GIC_HWIRQ_TO_LOCAL(hwirq)) {
723 case GIC_LOCAL_INT_TIMER:
724 case GIC_LOCAL_INT_PERFCTR:
725 case GIC_LOCAL_INT_FDC:
726 /*
727 * HACK: These are all really percpu interrupts, but
728 * the rest of the MIPS kernel code does not use the
729 * percpu IRQ API for them.
730 */
731 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
732 &gic_all_vpes_local_irq_controller,
733 NULL);
734 if (err)
735 return err;
736
737 irq_set_handler(virq, handle_percpu_irq);
738 break;
739
740 default:
741 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
742 &gic_local_irq_controller,
743 NULL);
744 if (err)
745 return err;
746
747 irq_set_handler(virq, handle_percpu_devid_irq);
748 irq_set_percpu_devid(virq);
749 break;
750 }
751
752 return gic_local_irq_domain_map(d, virq, hwirq);
753 }
754
755 static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
756 unsigned int nr_irqs, void *arg)
757 {
758 struct irq_fwspec *fwspec = arg;
759 irq_hw_number_t hwirq;
760
761 if (fwspec->param[0] == GIC_SHARED)
762 hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]);
763 else
764 hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]);
765
766 return gic_irq_domain_map(d, virq, hwirq);
767 }
768
769 void gic_irq_domain_free(struct irq_domain *d, unsigned int virq,
770 unsigned int nr_irqs)
771 {
772 }
773
774 static const struct irq_domain_ops gic_irq_domain_ops = {
775 .xlate = gic_irq_domain_xlate,
776 .alloc = gic_irq_domain_alloc,
777 .free = gic_irq_domain_free,
778 .map = gic_irq_domain_map,
779 };
780
781 static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
782 const u32 *intspec, unsigned int intsize,
783 irq_hw_number_t *out_hwirq,
784 unsigned int *out_type)
785 {
786 /*
787 * There's nothing to translate here. hwirq is dynamically allocated and
788 * the irq type is always edge triggered.
789 * */
790 *out_hwirq = 0;
791 *out_type = IRQ_TYPE_EDGE_RISING;
792
793 return 0;
794 }
795
796 static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq,
797 unsigned int nr_irqs, void *arg)
798 {
799 struct cpumask *ipimask = arg;
800 irq_hw_number_t hwirq, base_hwirq;
801 int cpu, ret, i;
802
803 base_hwirq = find_first_bit(ipi_available, gic_shared_intrs);
804 if (base_hwirq == gic_shared_intrs)
805 return -ENOMEM;
806
807 /* check that we have enough space */
808 for (i = base_hwirq; i < nr_irqs; i++) {
809 if (!test_bit(i, ipi_available))
810 return -EBUSY;
811 }
812 bitmap_clear(ipi_available, base_hwirq, nr_irqs);
813
814 /* map the hwirq for each cpu consecutively */
815 i = 0;
816 for_each_cpu(cpu, ipimask) {
817 hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i);
818
819 ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq,
820 &gic_edge_irq_controller,
821 NULL);
822 if (ret)
823 goto error;
824
825 ret = irq_domain_set_hwirq_and_chip(d->parent, virq + i, hwirq,
826 &gic_edge_irq_controller,
827 NULL);
828 if (ret)
829 goto error;
830
831 ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING);
832 if (ret)
833 goto error;
834
835 ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu);
836 if (ret)
837 goto error;
838
839 i++;
840 }
841
842 return 0;
843 error:
844 bitmap_set(ipi_available, base_hwirq, nr_irqs);
845 return ret;
846 }
847
848 void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq,
849 unsigned int nr_irqs)
850 {
851 irq_hw_number_t base_hwirq;
852 struct irq_data *data;
853
854 data = irq_get_irq_data(virq);
855 if (!data)
856 return;
857
858 base_hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data));
859 bitmap_set(ipi_available, base_hwirq, nr_irqs);
860 }
861
862 int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
863 enum irq_domain_bus_token bus_token)
864 {
865 bool is_ipi;
866
867 switch (bus_token) {
868 case DOMAIN_BUS_IPI:
869 is_ipi = d->bus_token == bus_token;
870 return (!node || to_of_node(d->fwnode) == node) && is_ipi;
871 break;
872 default:
873 return 0;
874 }
875 }
876
877 static struct irq_domain_ops gic_ipi_domain_ops = {
878 .xlate = gic_ipi_domain_xlate,
879 .alloc = gic_ipi_domain_alloc,
880 .free = gic_ipi_domain_free,
881 .match = gic_ipi_domain_match,
882 };
883
884 static void __init __gic_init(unsigned long gic_base_addr,
885 unsigned long gic_addrspace_size,
886 unsigned int cpu_vec, unsigned int irqbase,
887 struct device_node *node)
888 {
889 unsigned int gicconfig, cpu;
890 unsigned int v[2];
891
892 __gic_base_addr = gic_base_addr;
893
894 gic_base = ioremap_nocache(gic_base_addr, gic_addrspace_size);
895
896 gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
897 gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
898 GIC_SH_CONFIG_NUMINTRS_SHF;
899 gic_shared_intrs = ((gic_shared_intrs + 1) * 8);
900
901 gic_vpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
902 GIC_SH_CONFIG_NUMVPES_SHF;
903 gic_vpes = gic_vpes + 1;
904
905 if (cpu_has_veic) {
906 /* Set EIC mode for all VPEs */
907 for_each_present_cpu(cpu) {
908 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
909 mips_cm_vp_id(cpu));
910 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_CTL),
911 GIC_VPE_CTL_EIC_MODE_MSK);
912 }
913
914 /* Always use vector 1 in EIC mode */
915 gic_cpu_pin = 0;
916 timer_cpu_pin = gic_cpu_pin;
917 set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
918 __gic_irq_dispatch);
919 } else {
920 gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
921 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
922 gic_irq_dispatch);
923 /*
924 * With the CMP implementation of SMP (deprecated), other CPUs
925 * are started by the bootloader and put into a timer based
926 * waiting poll loop. We must not re-route those CPU's local
927 * timer interrupts as the wait instruction will never finish,
928 * so just handle whatever CPU interrupt it is routed to by
929 * default.
930 *
931 * This workaround should be removed when CMP support is
932 * dropped.
933 */
934 if (IS_ENABLED(CONFIG_MIPS_CMP) &&
935 gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
936 timer_cpu_pin = gic_read32(GIC_REG(VPE_LOCAL,
937 GIC_VPE_TIMER_MAP)) &
938 GIC_MAP_MSK;
939 irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
940 GIC_CPU_PIN_OFFSET +
941 timer_cpu_pin,
942 gic_irq_dispatch);
943 } else {
944 timer_cpu_pin = gic_cpu_pin;
945 }
946 }
947
948 gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
949 gic_shared_intrs, irqbase,
950 &gic_irq_domain_ops, NULL);
951 if (!gic_irq_domain)
952 panic("Failed to add GIC IRQ domain");
953 gic_irq_domain->name = "mips-gic-irq";
954
955 gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
956 IRQ_DOMAIN_FLAG_IPI_PER_CPU,
957 GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
958 node, &gic_ipi_domain_ops, NULL);
959 if (!gic_ipi_domain)
960 panic("Failed to add GIC IPI domain");
961
962 gic_ipi_domain->name = "mips-gic-ipi";
963 gic_ipi_domain->bus_token = DOMAIN_BUS_IPI;
964
965 if (node &&
966 !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
967 bitmap_set(ipi_resrv, v[0], v[1]);
968 } else {
969 /* Make the last 2 * gic_vpes available for IPIs */
970 bitmap_set(ipi_resrv,
971 gic_shared_intrs - 2 * gic_vpes,
972 2 * gic_vpes);
973 }
974
975 bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS);
976 gic_basic_init();
977 }
978
979 void __init gic_init(unsigned long gic_base_addr,
980 unsigned long gic_addrspace_size,
981 unsigned int cpu_vec, unsigned int irqbase)
982 {
983 __gic_init(gic_base_addr, gic_addrspace_size, cpu_vec, irqbase, NULL);
984 }
985
986 static int __init gic_of_init(struct device_node *node,
987 struct device_node *parent)
988 {
989 struct resource res;
990 unsigned int cpu_vec, i = 0, reserved = 0;
991 phys_addr_t gic_base;
992 size_t gic_len;
993
994 /* Find the first available CPU vector. */
995 while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
996 i++, &cpu_vec))
997 reserved |= BIT(cpu_vec);
998 for (cpu_vec = 2; cpu_vec < 8; cpu_vec++) {
999 if (!(reserved & BIT(cpu_vec)))
1000 break;
1001 }
1002 if (cpu_vec == 8) {
1003 pr_err("No CPU vectors available for GIC\n");
1004 return -ENODEV;
1005 }
1006
1007 if (of_address_to_resource(node, 0, &res)) {
1008 /*
1009 * Probe the CM for the GIC base address if not specified
1010 * in the device-tree.
1011 */
1012 if (mips_cm_present()) {
1013 gic_base = read_gcr_gic_base() &
1014 ~CM_GCR_GIC_BASE_GICEN_MSK;
1015 gic_len = 0x20000;
1016 } else {
1017 pr_err("Failed to get GIC memory range\n");
1018 return -ENODEV;
1019 }
1020 } else {
1021 gic_base = res.start;
1022 gic_len = resource_size(&res);
1023 }
1024
1025 if (mips_cm_present())
1026 write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
1027 gic_present = true;
1028
1029 __gic_init(gic_base, gic_len, cpu_vec, 0, node);
1030
1031 return 0;
1032 }
1033 IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);