drivers: power: report battery voltage in AOSP compatible format
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / irqchip / irq-mt-gic.c
CommitLineData
6fa3eb70
S
1/*
2 * Copy from ARM GIC and add mediatek interrupt specific control codes
3 */
4#include <linux/init.h>
5#include <linux/kernel.h>
6#include <linux/err.h>
7#include <linux/module.h>
8#include <linux/list.h>
9#include <linux/smp.h>
10#include <linux/cpu.h>
11#include <linux/cpu_pm.h>
12#include <linux/cpumask.h>
13#include <linux/io.h>
14#include <linux/of.h>
15#include <linux/of_address.h>
16#include <linux/of_irq.h>
17#include <linux/irqdomain.h>
18#include <linux/interrupt.h>
19#include <linux/percpu.h>
20#include <linux/slab.h>
21#include <linux/irqchip/chained_irq.h>
22#include <linux/irqchip/arm-gic.h>
23#include <linux/irqchip/mt-gic.h>
24
25#include <asm/irq.h>
26#include <asm/exception.h>
27#include <asm/smp_plat.h>
28
29#include "irqchip.h"
30#include <mach/mt_secure_api.h>
31
32
33union gic_base
34{
35 void __iomem *common_base;
36 void __percpu __iomem **percpu_base;
37};
38
39struct gic_chip_data
40{
41 union gic_base dist_base;
42 union gic_base cpu_base;
43#ifdef CONFIG_CPU_PM
44 u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
45 u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
46 u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
47 u32 __percpu *saved_ppi_enable;
48 u32 __percpu *saved_ppi_conf;
49#endif
50 struct irq_domain *domain;
51 unsigned int gic_irqs;
52#ifdef CONFIG_GIC_NON_BANKED
53 void __iomem *(*get_base)(union gic_base *);
54#endif
55};
56
57
58void (*irq_pol_workaround)(phys_addr_t addr, u32 value);
59
60
61
62static DEFINE_RAW_SPINLOCK(irq_controller_lock);
63
64/*
65 * The GIC mapping of CPU interfaces does not necessarily match
66 * the logical CPU numbering. Let's use a mapping as returned
67 * by the GIC itself.
68 */
69#define NR_GIC_CPU_IF 8
70static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
71
72
73#ifndef MAX_GIC_NR
74#define MAX_GIC_NR 1
75#endif
76
77static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
78
79#ifdef CONFIG_GIC_NON_BANKED
80static void __iomem *gic_get_percpu_base(union gic_base *base)
81{
82 return *__this_cpu_ptr(base->percpu_base);
83}
84
85static void __iomem *gic_get_common_base(union gic_base *base)
86{
87 return base->common_base;
88}
89
90static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data)
91{
92 return data->get_base(&data->dist_base);
93}
94
95static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data)
96{
97 return data->get_base(&data->cpu_base);
98}
99
100static inline void gic_set_base_accessor(struct gic_chip_data *data,
101 void __iomem *(*f)(union gic_base *))
102{
103 data->get_base = f;
104}
105#else
106#define gic_data_dist_base(d) ((d)->dist_base.common_base)
107#define gic_data_cpu_base(d) ((d)->cpu_base.common_base)
108#define gic_set_base_accessor(d, f)
109#endif
110
111static inline void __iomem *gic_dist_base(struct irq_data *d)
112{
113 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
114 return gic_data_dist_base(gic_data);
115}
116
117static inline void __iomem *gic_cpu_base(struct irq_data *d)
118{
119 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
120 return gic_data_cpu_base(gic_data);
121}
122
123static inline unsigned int gic_irq(struct irq_data *d)
124{
125 return d->hwirq;
126}
127
128/*
129 * Routines to acknowledge, disable and enable interrupts
130 */
131static void gic_mask_irq(struct irq_data *d)
132{
133 u32 mask = 1 << (gic_irq(d) % 32);
134
135 raw_spin_lock(&irq_controller_lock);
136 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
137 raw_spin_unlock(&irq_controller_lock);
138}
139
140static void gic_unmask_irq(struct irq_data *d)
141{
142 u32 mask = 1 << (gic_irq(d) % 32);
143
144 raw_spin_lock(&irq_controller_lock);
145 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
146 raw_spin_unlock(&irq_controller_lock);
147}
148
149static void gic_eoi_irq(struct irq_data *d)
150{
151 writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
152}
153
154
155void __iomem *GIC_DIST_BASE;
156void __iomem *GIC_CPU_BASE;
157void __iomem *INT_POL_CTL0;
158phys_addr_t INT_POL_CTL0_phys;
159
160__weak void mt_set_pol_reg(u32 reg_index, u32 value)
161{
162 writel_relaxed(value, (INT_POL_CTL0 + (reg_index * 4)));
163}
164
165void mt_irq_set_polarity(unsigned int irq, unsigned int polarity)
166{
167 u32 offset, reg_index, value;
168
169 if (irq < 32)
170 {
171 printk(KERN_CRIT "Fail to set polarity of interrupt %d\n", irq);
172 return ;
173 }
174
175 offset = (irq - 32) & 0x1F;
176 reg_index = (irq - 32) >> 5;
177
178 //raw_spin_lock(&irq_controller_lock);
179
180 if (polarity == 0)
181 {
182 /* active low */
183 value = readl_relaxed(IOMEM(INT_POL_CTL0 + (reg_index * 4)));
184 value |= (1 << offset);
185 /* some platforms has to write POL register in secure world. USE PHYSICALL ADDRESS */
186 mt_set_pol_reg(reg_index, value);
187 }
188 else
189 {
190 /* active high */
191 value = readl_relaxed(IOMEM(INT_POL_CTL0 + (reg_index * 4)));
192 value &= ~(0x1 << offset);
193 /* some platforms has to write POL register in secure world */
194 mt_set_pol_reg(reg_index, value);
195 }
196
197 //raw_spin_unlock(&irq_controller_lock);
198}
199
200static int gic_set_type(struct irq_data *d, unsigned int type)
201{
202 void __iomem *base = gic_dist_base(d);
203 unsigned int gicirq = gic_irq(d);
204 u32 enablemask = 1 << (gicirq % 32);
205 u32 enableoff = (gicirq / 32) * 4;
206 u32 confmask = 0x2 << ((gicirq % 16) * 2);
207 u32 confoff = (gicirq / 16) * 4;
208 bool enabled = false;
209 u32 val;
210
211 /* Interrupt configuration for SGIs can't be changed */
212 if (gicirq < 16)
213 return -EINVAL;
214
215 //if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
216 //return -EINVAL;
217
218 raw_spin_lock(&irq_controller_lock);
219
220 val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
221 if (type == IRQ_TYPE_LEVEL_HIGH)
222 val &= ~confmask;
223 else if (type == IRQ_TYPE_EDGE_RISING)
224 val |= confmask;
225
226 /*
227 * As recommended by the spec, disable the interrupt before changing
228 * the configuration
229 */
230 if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask)
231 {
232 writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
233 enabled = true;
234 }
235
236 writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
237
238 if (enabled)
239 writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
240
241 /*mtk polarity setting*/
242 if (type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
243 {
244 mt_irq_set_polarity(gicirq, (type & IRQF_TRIGGER_FALLING) ? 0 : 1);
245 }
246 else if (type & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW))
247 {
248 mt_irq_set_polarity(gicirq, (type & IRQF_TRIGGER_LOW) ? 0 : 1);
249 }
250
251 raw_spin_unlock(&irq_controller_lock);
252
253 return 0;
254}
255
256static int gic_retrigger(struct irq_data *d)
257{
258 /* the genirq layer expects 0 if we can't retrigger in hardware */
259 return 0;
260}
261
262#ifdef CONFIG_SMP
263static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
264 bool force)
265{
266 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
267 unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
268 u32 val, mask, bit;
269
270 if (!force)
271 cpu = cpumask_any_and(mask_val, cpu_online_mask);
272 else
273 cpu = cpumask_first(mask_val);
274
275 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
276 return -EINVAL;
277
278 mask = 0xff << shift;
279 bit = gic_cpu_map[cpu] << shift;
280
281 raw_spin_lock(&irq_controller_lock);
282 val = readl_relaxed(reg) & ~mask;
283 writel_relaxed(val | bit, reg);
284 raw_spin_unlock(&irq_controller_lock);
285
286 return IRQ_SET_MASK_OK;
287}
288#endif
289
290#ifdef CONFIG_PM
291static int gic_set_wake(struct irq_data *d, unsigned int on)
292{
293 int ret = -ENXIO;
294
295 return ret;
296}
297
298#else
299#define gic_set_wake NULL
300#endif
301
302static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
303{
304 u32 irqstat, irqnr;
305 struct gic_chip_data *gic = &gic_data[0];
306 void __iomem *cpu_base = gic_data_cpu_base(gic);
307
308 do
309 {
310 irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
311 irqnr = irqstat & ~0x1c00;
312
313 if (likely(irqnr > 15 && irqnr < 1021))
314 {
315 irqnr = irq_find_mapping(gic->domain, irqnr);
316 handle_IRQ(irqnr, regs);
317 continue;
318 }
319 if (irqnr < 16)
320 {
321 writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
322#ifdef CONFIG_SMP
323 handle_IPI(irqnr, regs);
324#endif
325 continue;
326 }
327 break;
328 }
329 while (1);
330}
331
332static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
333{
334 struct gic_chip_data *chip_data = irq_get_handler_data(irq);
335 struct irq_chip *chip = irq_get_chip(irq);
336 unsigned int cascade_irq, gic_irq;
337 unsigned long status;
338
339 chained_irq_enter(chip, desc);
340
341 raw_spin_lock(&irq_controller_lock);
342 status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
343 raw_spin_unlock(&irq_controller_lock);
344
345 gic_irq = (status & 0x3ff);
346 if (gic_irq == 1023)
347 goto out;
348
349 cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
350 if (unlikely(gic_irq < 32 || gic_irq > 1020))
351 handle_bad_irq(cascade_irq, desc);
352 else
353 generic_handle_irq(cascade_irq);
354
355out:
356 chained_irq_exit(chip, desc);
357}
358
359static struct irq_chip gic_chip =
360{
361 .name = "GIC",
362 .irq_mask = gic_mask_irq,
363 .irq_unmask = gic_unmask_irq,
364 .irq_eoi = gic_eoi_irq,
365 .irq_set_type = gic_set_type,
366 .irq_retrigger = gic_retrigger,
367#ifdef CONFIG_SMP
368 .irq_set_affinity = gic_set_affinity,
369#endif
370 .irq_set_wake = gic_set_wake,
371};
372
373void __init mt_gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
374{
375 if (gic_nr >= MAX_GIC_NR)
376 BUG();
377 if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0)
378 BUG();
379 irq_set_chained_handler(irq, gic_handle_cascade_irq);
380}
381
382/*
383static u8 gic_get_cpumask(struct gic_chip_data *gic)
384{
385 void __iomem *base = gic_data_dist_base(gic);
386 u32 mask, i;
387
388 for (i = mask = 0; i < 32; i += 4) {
389 mask = readl_relaxed(base + GIC_DIST_TARGET + i);
390 mask |= mask >> 16;
391 mask |= mask >> 8;
392 if (mask)
393 break;
394 }
395
396 if (!mask)
397 pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
398
399 return mask;
400}
401*/
402
403static void __init gic_dist_init(struct gic_chip_data *gic)
404{
405 unsigned int i;
406 u32 cpumask;
407 unsigned int gic_irqs = gic->gic_irqs;
408 void __iomem *base = gic_data_dist_base(gic);
409
410 writel_relaxed(0, base + GIC_DIST_CTRL);
411
412 /*
413 * Set all global interrupts to be level triggered, active low.
414 */
415 for (i = 32; i < gic_irqs; i += 16)
416 writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16);
417
418 /*
419 * Set all global interrupts to this CPU only.
420 */
421 //cpumask = gic_get_cpumask(gic);
422 /*FIXME*/
423 cpumask = 1 << smp_processor_id();
424 cpumask |= cpumask << 8;
425 cpumask |= cpumask << 16;
426 for (i = 32; i < gic_irqs; i += 4)
427 writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
428
429 /*
430 * Set priority on all global interrupts.
431 */
432 for (i = 32; i < gic_irqs; i += 4)
433 writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
434
435 /*
436 * Disable all interrupts. Leave the PPI and SGIs alone
437 * as these enables are banked registers.
438 */
439 for (i = 32; i < gic_irqs; i += 32)
440 writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
441
442 writel_relaxed(1, base + GIC_DIST_CTRL);
443}
444
445static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
446{
447 void __iomem *dist_base = gic_data_dist_base(gic);
448 void __iomem *base = gic_data_cpu_base(gic);
449 unsigned int cpu_mask, cpu = smp_processor_id();
450 int i;
451
452 /*
453 * Get what the GIC says our CPU mask is.
454 */
455 BUG_ON(cpu >= NR_GIC_CPU_IF);
456 //cpu_mask = gic_get_cpumask(gic);
457 //FIXME
458 cpu_mask = 1 << smp_processor_id();
459 gic_cpu_map[cpu] = cpu_mask;
460
461 /*
462 * Clear our mask from the other map entries in case they're
463 * still undefined.
464 */
465 for (i = 0; i < NR_GIC_CPU_IF; i++)
466 if (i != cpu)
467 gic_cpu_map[i] &= ~cpu_mask;
468
469 /*
470 * Deal with the banked PPI and SGI interrupts - disable all
471 * PPI interrupts, ensure all SGI interrupts are enabled.
472 */
473 writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
474 writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
475
476 /*
477 * Set priority on PPI and SGI interrupts
478 */
479 for (i = 0; i < 32; i += 4)
480 writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
481
482 writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
483 writel_relaxed(1, base + GIC_CPU_CTRL);
484}
485
486#ifdef CONFIG_CPU_PM
487/*
488 * Saves the GIC distributor registers during suspend or idle. Must be called
489 * with interrupts disabled but before powering down the GIC. After calling
490 * this function, no interrupts will be delivered by the GIC, and another
491 * platform-specific wakeup source must be enabled.
492 */
493static void gic_dist_save(unsigned int gic_nr)
494{
495 unsigned int gic_irqs;
496 void __iomem *dist_base;
497 int i;
498
499 if (gic_nr >= MAX_GIC_NR)
500 BUG();
501
502 gic_irqs = gic_data[gic_nr].gic_irqs;
503 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
504
505 if (!dist_base)
506 return;
507
508 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
509 gic_data[gic_nr].saved_spi_conf[i] =
510 readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
511
512 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
513 gic_data[gic_nr].saved_spi_target[i] =
514 readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
515
516 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
517 gic_data[gic_nr].saved_spi_enable[i] =
518 readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
519}
520
521/*
522 * Restores the GIC distributor registers during resume or when coming out of
523 * idle. Must be called before enabling interrupts. If a level interrupt
524 * that occured while the GIC was suspended is still present, it will be
525 * handled normally, but any edge interrupts that occured will not be seen by
526 * the GIC and need to be handled by the platform-specific wakeup source.
527 */
528static void gic_dist_restore(unsigned int gic_nr)
529{
530 unsigned int gic_irqs;
531 unsigned int i;
532 void __iomem *dist_base;
533
534 if (gic_nr >= MAX_GIC_NR)
535 BUG();
536
537 gic_irqs = gic_data[gic_nr].gic_irqs;
538 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
539
540 if (!dist_base)
541 return;
542
543 writel_relaxed(0, dist_base + GIC_DIST_CTRL);
544
545 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
546 writel_relaxed(gic_data[gic_nr].saved_spi_conf[i],
547 dist_base + GIC_DIST_CONFIG + i * 4);
548
549 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
550 writel_relaxed(0xa0a0a0a0,
551 dist_base + GIC_DIST_PRI + i * 4);
552
553 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
554 writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
555 dist_base + GIC_DIST_TARGET + i * 4);
556
557 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
558 writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
559 dist_base + GIC_DIST_ENABLE_SET + i * 4);
560
561 writel_relaxed(1, dist_base + GIC_DIST_CTRL);
562}
563
564static void gic_cpu_save(unsigned int gic_nr)
565{
566 int i;
567 u32 *ptr;
568 void __iomem *dist_base;
569 void __iomem *cpu_base;
570
571 if (gic_nr >= MAX_GIC_NR)
572 BUG();
573
574 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
575 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
576
577 if (!dist_base || !cpu_base)
578 return;
579
580 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
581 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
582 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
583
584 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
585 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
586 ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
587
588}
589
590static void gic_cpu_restore(unsigned int gic_nr)
591{
592 int i;
593 u32 *ptr;
594 void __iomem *dist_base;
595 void __iomem *cpu_base;
596
597 if (gic_nr >= MAX_GIC_NR)
598 BUG();
599
600 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
601 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
602
603 if (!dist_base || !cpu_base)
604 return;
605
606 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
607 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
608 writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
609
610 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
611 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
612 writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
613
614 for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
615 writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4);
616
617 writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK);
618 writel_relaxed(1, cpu_base + GIC_CPU_CTRL);
619}
620
621static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
622{
623 int i;
624
625 for (i = 0; i < MAX_GIC_NR; i++)
626 {
627#ifdef CONFIG_GIC_NON_BANKED
628 /* Skip over unused GICs */
629 if (!gic_data[i].get_base)
630 continue;
631#endif
632 switch (cmd)
633 {
634 case CPU_PM_ENTER:
635 gic_cpu_save(i);
636 break;
637 case CPU_PM_ENTER_FAILED:
638 case CPU_PM_EXIT:
639 gic_cpu_restore(i);
640 break;
641 case CPU_CLUSTER_PM_ENTER:
642 gic_dist_save(i);
643 break;
644 case CPU_CLUSTER_PM_ENTER_FAILED:
645 case CPU_CLUSTER_PM_EXIT:
646 gic_dist_restore(i);
647 break;
648 }
649 }
650
651 return NOTIFY_OK;
652}
653
654static struct notifier_block gic_notifier_block =
655{
656 .notifier_call = gic_notifier,
657};
658
659static void __init gic_pm_init(struct gic_chip_data *gic)
660{
661 gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
662 sizeof(u32));
663 BUG_ON(!gic->saved_ppi_enable);
664
665 gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
666 sizeof(u32));
667 BUG_ON(!gic->saved_ppi_conf);
668
669 if (gic == &gic_data[0])
670 cpu_pm_register_notifier(&gic_notifier_block);
671}
672#else
673static void __init gic_pm_init(struct gic_chip_data *gic)
674{
675}
676#endif
677
678#ifdef CONFIG_SMP
679void mt_gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
680{
681 int cpu;
682 unsigned long map = 0;
683
684 /* Convert our logical CPU mask into a physical one. */
685 for_each_cpu(cpu, mask)
686 map |= gic_cpu_map[cpu];
687
688 /*
689 * Ensure that stores to Normal memory are visible to the
690 * other CPUs before issuing the IPI.
691 */
692 dsb();
693
694 /* this always happens on GIC0 */
695 writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
696}
697#endif
698
699static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
700 irq_hw_number_t hw)
701{
702 if (hw < 32)
703 {
704 irq_set_percpu_devid(irq);
705 irq_set_chip_and_handler(irq, &gic_chip,
706 handle_percpu_devid_irq);
707 set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
708 }
709 else
710 {
711 irq_set_chip_and_handler(irq, &gic_chip,
712 handle_fasteoi_irq);
713 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
714 }
715 irq_set_chip_data(irq, d->host_data);
716 return 0;
717}
718
719static int gic_irq_domain_xlate(struct irq_domain *d,
720 struct device_node *controller,
721 const u32 *intspec, unsigned int intsize,
722 unsigned long *out_hwirq, unsigned int *out_type)
723{
724 if (d->of_node != controller)
725 return -EINVAL;
726 if (intsize < 3)
727 return -EINVAL;
728
729 /* Get the interrupt number and add 16 to skip over SGIs */
730 *out_hwirq = intspec[1] + 16;
731
732 /* For SPIs, we need to add 16 more to get the GIC irq ID number */
733 if (!intspec[0])
734 *out_hwirq += 16;
735
736 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
737 return 0;
738}
739
740void mt_gic_register_sgi(unsigned int gic_nr, int irq)
741{
742 struct irq_desc *desc = irq_to_desc(irq);
743 if (desc)
744 desc->irq_data.hwirq = irq;
745 irq_set_chip_and_handler(irq, &gic_chip,
746 handle_fasteoi_irq);
747 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
748 irq_set_chip_data(irq, &gic_data[gic_nr]);
749}
750
751#ifdef CONFIG_SMP
752static int __cpuinit gic_secondary_init(struct notifier_block *nfb,
753 unsigned long action, void *hcpu)
754{
755 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
756 gic_cpu_init(&gic_data[0]);
757 return NOTIFY_OK;
758}
759
760/*
761 * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
762 * priority because the GIC needs to be up before the ARM generic timers.
763 */
764static struct notifier_block __cpuinitdata gic_cpu_notifier =
765{
766 .notifier_call = gic_secondary_init,
767 .priority = 100,
768};
769#endif
770
771const struct irq_domain_ops mt_gic_irq_domain_ops =
772{
773 .map = gic_irq_domain_map,
774 .xlate = gic_irq_domain_xlate,
775};
776
777void __init mt_gic_init_bases(unsigned int gic_nr, int irq_start,
778 void __iomem *dist_base, void __iomem *cpu_base,
779 u32 percpu_offset, struct device_node *node)
780{
781 irq_hw_number_t hwirq_base;
782 struct gic_chip_data *gic;
783 int gic_irqs, irq_base, i;
784
785 BUG_ON(gic_nr >= MAX_GIC_NR);
786
787 gic = &gic_data[gic_nr];
788#ifdef CONFIG_GIC_NON_BANKED
789 if (percpu_offset) /* Frankein-GIC without banked registers... */
790 {
791 unsigned int cpu;
792
793 gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
794 gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
795 if (WARN_ON(!gic->dist_base.percpu_base ||
796 !gic->cpu_base.percpu_base))
797 {
798 free_percpu(gic->dist_base.percpu_base);
799 free_percpu(gic->cpu_base.percpu_base);
800 return;
801 }
802
803 for_each_possible_cpu(cpu)
804 {
805 unsigned long offset = percpu_offset * cpu_logical_map(cpu);
806 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
807 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
808 }
809
810 gic_set_base_accessor(gic, gic_get_percpu_base);
811 }
812 else
813#endif
814 {
815 /* Normal, sane GIC... */
816 WARN(percpu_offset,
817 "GIC_NON_BANKED not enabled, ignoring %08x offset!",
818 percpu_offset);
819 gic->dist_base.common_base = dist_base;
820 gic->cpu_base.common_base = cpu_base;
821 gic_set_base_accessor(gic, gic_get_common_base);
822 }
823
824 /*
825 * Initialize the CPU interface map to all CPUs.
826 * It will be refined as each CPU probes its ID.
827 */
828 for (i = 0; i < NR_GIC_CPU_IF; i++)
829 gic_cpu_map[i] = 0xff;
830
831 /*
832 * For primary GICs, skip over SGIs.
833 * For secondary GICs, skip over PPIs, too.
834 */
835 if (gic_nr == 0 && (irq_start & 31) > 0)
836 {
837 hwirq_base = 16;
838 if (irq_start != -1)
839 irq_start = (irq_start & ~31) + 16;
840 }
841 else
842 {
843 hwirq_base = 32;
844 }
845
846 /*
847 * Find out how many interrupts are supported.
848 * The GIC only supports up to 1020 interrupt sources.
849 */
850 gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f;
851 gic_irqs = (gic_irqs + 1) * 32;
852 if (gic_irqs > 1020)
853 gic_irqs = 1020;
854 gic->gic_irqs = gic_irqs;
855
856 gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
857 irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, numa_node_id());
858 if (IS_ERR_VALUE(irq_base))
859 {
860 WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
861 irq_start);
862 irq_base = irq_start;
863 }
864 gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
865 hwirq_base, &mt_gic_irq_domain_ops, gic);
866 if (WARN_ON(!gic->domain))
867 return;
868
869#ifdef CONFIG_SMP
870 set_smp_cross_call(mt_gic_raise_softirq);
871 register_cpu_notifier(&gic_cpu_notifier);
872#endif
873
874 set_handle_irq(gic_handle_irq);
875
876 gic_dist_init(gic);
877 gic_cpu_init(gic);
878 gic_pm_init(gic);
879}
880
881
882/* Special APIs for specific modules */
883
884static spinlock_t irq_lock;
885
886/*
887 * mt_irq_mask_all: disable all interrupts
888 * @mask: pointer to struct mtk_irq_mask for storing the original mask value.
889 * Return 0 for success; return negative values for failure.
890 * (This is ONLY used for the idle current measurement by the factory mode.)
891 */
892int mt_irq_mask_all(struct mtk_irq_mask *mask)
893{
894 unsigned long flags;
895 void __iomem *dist_base;
896
897 dist_base = gic_data_dist_base(&gic_data[0]);
898
899 if (mask)
900 {
901//#if defined(CONFIG_FIQ_GLUE)
902// local_fiq_disable();
903//#endif
904 spin_lock_irqsave(&irq_lock, flags);
905
906 mask->mask0 = readl((dist_base + GIC_DIST_ENABLE_SET));
907 mask->mask1 = readl((dist_base + GIC_DIST_ENABLE_SET + 0x4));
908 mask->mask2 = readl((dist_base + GIC_DIST_ENABLE_SET + 0x8));
909 mask->mask3 = readl((dist_base + GIC_DIST_ENABLE_SET + 0xC));
910 mask->mask4 = readl((dist_base + GIC_DIST_ENABLE_SET + 0x10));
911 mask->mask5 = readl((dist_base + GIC_DIST_ENABLE_SET + 0x14));
912 mask->mask6 = readl((dist_base + GIC_DIST_ENABLE_SET + 0x18));
913 mask->mask7 = readl((dist_base + GIC_DIST_ENABLE_SET + 0x1C));
914 mask->mask8 = readl((dist_base + GIC_DIST_ENABLE_SET + 0x20));
915
916 writel(0xFFFFFFFF, (dist_base + GIC_DIST_ENABLE_CLEAR));
917 writel(0xFFFFFFFF, (dist_base + GIC_DIST_ENABLE_CLEAR + 0x4));
918 writel(0xFFFFFFFF, (dist_base + GIC_DIST_ENABLE_CLEAR + 0x8));
919 writel(0xFFFFFFFF, (dist_base + GIC_DIST_ENABLE_CLEAR + 0xC));
920 writel(0xFFFFFFFF, (dist_base + GIC_DIST_ENABLE_CLEAR + 0x10));
921 writel(0xFFFFFFFF, (dist_base + GIC_DIST_ENABLE_CLEAR + 0x14));
922 writel(0xFFFFFFFF, (dist_base + GIC_DIST_ENABLE_CLEAR + 0x18));
923 writel(0xFFFFFFFF, (dist_base + GIC_DIST_ENABLE_CLEAR + 0x1C));
924 writel(0xFFFFFFFF, (dist_base + GIC_DIST_ENABLE_CLEAR + 0x20));
925 dsb();
926
927 spin_unlock_irqrestore(&irq_lock, flags);
928//#if defined(CONFIG_FIQ_GLUE)
929// local_fiq_enable();
930//#endif
931
932 mask->header = IRQ_MASK_HEADER;
933 mask->footer = IRQ_MASK_FOOTER;
934
935 return 0;
936 }
937 else
938 {
939 return -1;
940 }
941}
942
943/*
944 * mt_irq_mask_restore: restore all interrupts
945 * @mask: pointer to struct mtk_irq_mask for storing the original mask value.
946 * Return 0 for success; return negative values for failure.
947 * (This is ONLY used for the idle current measurement by the factory mode.)
948 */
949int mt_irq_mask_restore(struct mtk_irq_mask *mask)
950{
951 unsigned long flags;
952 void __iomem *dist_base;
953
954 dist_base = gic_data_dist_base(&gic_data[0]);
955
956 if (!mask)
957 {
958 return -1;
959 }
960 if (mask->header != IRQ_MASK_HEADER)
961 {
962 return -1;
963 }
964 if (mask->footer != IRQ_MASK_FOOTER)
965 {
966 return -1;
967 }
968
969//#if defined(CONFIG_FIQ_GLUE)
970// local_fiq_disable();
971//#endif
972 spin_lock_irqsave(&irq_lock, flags);
973
974 writel(mask->mask0, (dist_base + GIC_DIST_ENABLE_SET));
975 writel(mask->mask1, (dist_base + GIC_DIST_ENABLE_SET + 0x4));
976 writel(mask->mask2, (dist_base + GIC_DIST_ENABLE_SET + 0x8));
977 writel(mask->mask3, (dist_base + GIC_DIST_ENABLE_SET + 0xC));
978 writel(mask->mask4, (dist_base + GIC_DIST_ENABLE_SET + 0x10));
979 writel(mask->mask5, (dist_base + GIC_DIST_ENABLE_SET + 0x14));
980 writel(mask->mask6, (dist_base + GIC_DIST_ENABLE_SET + 0x18));
981 writel(mask->mask7, (dist_base + GIC_DIST_ENABLE_SET + 0x1C));
982 writel(mask->mask8, (dist_base + GIC_DIST_ENABLE_SET + 0x20));
983 dsb();
984
985 spin_unlock_irqrestore(&irq_lock, flags);
986//#if defined(CONFIG_FIQ_GLUE)
987// local_fiq_enable();
988//#endif
989
990 return 0;
991}
992
993/*
994 * mt_irq_set_pending_for_sleep: pending an interrupt for the sleep manager's use
995 * @irq: interrupt id
996 * (THIS IS ONLY FOR SLEEP FUNCTION USE. DO NOT USE IT YOURSELF!)
997 */
998void mt_irq_set_pending_for_sleep(unsigned int irq)
999{
1000 void __iomem *dist_base;
1001 u32 mask = 1 << (irq % 32);
1002
1003 dist_base = gic_data_dist_base(&gic_data[0]);
1004
1005 if (irq < 16)
1006 {
1007 pr_err("Fail to set a pending on interrupt %d\n", irq);
1008 return ;
1009 }
1010
1011 *(volatile u32 *)(dist_base + GIC_DIST_PENDING_SET + irq / 32 * 4) = mask;
1012 pr_notice(KERN_CRIT "irq:%d, 0x%p=0x%x\n", irq, dist_base + GIC_DIST_PENDING_SET + irq / 32 * 4,mask);
1013 dsb();
1014}
1015
1016/*
1017 * mt_irq_unmask_for_sleep: enable an interrupt for the sleep manager's use
1018 * @irq: interrupt id
1019 * (THIS IS ONLY FOR SLEEP FUNCTION USE. DO NOT USE IT YOURSELF!)
1020 */
1021void mt_irq_unmask_for_sleep(unsigned int irq)
1022{
1023 void __iomem *dist_base;
1024 u32 mask = 1 << (irq % 32);
1025
1026 dist_base = gic_data_dist_base(&gic_data[0]);
1027
1028 if (irq < 16)
1029 {
1030 pr_err(KERN_CRIT "Fail to enable interrupt %d\n", irq);
1031 return ;
1032 }
1033
1034 *(volatile u32 *)(dist_base + GIC_DIST_ENABLE_SET + irq / 32 * 4) = mask;
1035 dsb();
1036}
1037
1038/*
1039 * mt_irq_mask_for_sleep: disable an interrupt for the sleep manager's use
1040 * @irq: interrupt id
1041 * (THIS IS ONLY FOR SLEEP FUNCTION USE. DO NOT USE IT YOURSELF!)
1042 */
1043void mt_irq_mask_for_sleep(unsigned int irq)
1044{
1045 void __iomem *dist_base;
1046 u32 mask = 1 << (irq % 32);
1047
1048 dist_base = gic_data_dist_base(&gic_data[0]);
1049
1050 if (irq < 16)
1051 {
1052 pr_err(KERN_CRIT "Fail to enable interrupt %d\n", irq);
1053 return ;
1054 }
1055
1056 *(volatile u32 *)(dist_base + GIC_DIST_ENABLE_CLEAR + irq / 32 * 4) = mask;
1057 dsb();
1058}
1059
1060/*
1061 * mt_irq_set_sens: set the interrupt sensitivity
1062 * @irq: interrupt id
1063 * @sens: sensitivity
1064 */
1065void mt_irq_set_sens(unsigned int irq, unsigned int sens)
1066{
1067 unsigned long flags;
1068 u32 config;
1069
1070 if (irq < 32) {
1071 pr_err("Fail to set sensitivity of interrupt %d\n", irq);
1072 return ;
1073 }
1074
1075 spin_lock_irqsave(&irq_lock, flags);
1076
1077 if (sens == MT_EDGE_SENSITIVE) {
1078 config = readl(GIC_DIST_BASE + GIC_DIST_CONFIG + (irq / 16) * 4);
1079 config |= (0x2 << (irq % 16) * 2);
1080 writel(config, GIC_DIST_BASE + GIC_DIST_CONFIG + (irq / 16) * 4);
1081 } else {
1082 config = readl(GIC_DIST_BASE + GIC_DIST_CONFIG + (irq / 16) * 4);
1083 config &= ~(0x2 << (irq % 16) * 2);
1084 writel(config, GIC_DIST_BASE + GIC_DIST_CONFIG + (irq / 16) * 4);
1085 }
1086
1087 spin_unlock_irqrestore(&irq_lock, flags);
1088
1089 dsb();
1090}
1091
1092void mt_irq_dump_status(int irq)
1093{
1094 int rc;
1095 unsigned int result;
1096
1097 pr_notice("[mt gic dump] irq = %d\n", irq);
1098
1099 rc = mt_secure_call(MTK_SIP_KERNEL_GIC_DUMP, irq, 0, 0);
1100 if(rc < 0)
1101 {
1102 pr_notice("[mt gic dump] not allowed to dump!\n");
1103 return;
1104 }
1105
1106 /* get mask */
1107 result = rc & 0x1;
1108 pr_notice("[mt gic dump] enable = %d\n", result);
1109
1110 /* get group */
1111 result = (rc >> 1) & 0x1;
1112 pr_notice("[mt gic dump] group = %x (0x1:irq,0x0:fiq)\n", result);
1113
1114 /* get priority */
1115 result = (rc >> 2) & 0xff;
1116 pr_notice("[mt gic dump] priority = %x\n", result);
1117
1118 /* get sensitivity */
1119 result = (rc >> 10) & 0x3;
1120 pr_notice("[mt gic dump] sensitivity = %x (edge:0x1, level:0x0)\n", result>>1);
1121
1122 /* get pending status */
1123 result = (rc >> 11) & 0x1;
1124 pr_notice("[mt gic dump] pending = %x\n", result);
1125
1126 /* get active status */
1127 result = (rc >> 12) & 0x1;
1128 pr_notice("[mt gic dump] active status = %x\n", result);
1129
1130 /* get polarity */
1131 result = (rc >> 13) & 0x1;
1132 pr_notice("[mt gic dump] polarity = %x (0x0: high, 0x1:low)\n", result);
1133
1134}
1135
1136
1137#ifdef CONFIG_OF
1138static int gic_cnt __initdata;
1139
1140int __init mt_gic_of_init(struct device_node *node, struct device_node *parent)
1141{
1142 void __iomem *cpu_base;
1143 void __iomem *dist_base;
1144 void __iomem *pol_base;
1145 u32 percpu_offset;
1146 int irq;
1147 struct resource res;
1148
1149 if (WARN_ON(!node))
1150 return -ENODEV;
1151
1152 spin_lock_init(&irq_lock);
1153
1154 dist_base = of_iomap(node, 0);
1155 WARN(!dist_base, "unable to map gic dist registers\n");
1156 GIC_DIST_BASE = dist_base;
1157
1158 cpu_base = of_iomap(node, 1);
1159 WARN(!cpu_base, "unable to map gic cpu registers\n");
1160 GIC_CPU_BASE = cpu_base;
1161
1162 pol_base = of_iomap(node, 2);
1163 WARN(!pol_base, "unable to map pol registers\n");
1164 INT_POL_CTL0 = pol_base;
1165 if (of_address_to_resource(node, 2, &res))
1166 {
1167 WARN(!pol_base, "unable to map pol registers\n");
1168 }
1169 INT_POL_CTL0_phys = res.start;
1170
1171 if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
1172 percpu_offset = 0;
1173
1174 mt_gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
1175
1176 if (parent)
1177 {
1178 irq = irq_of_parse_and_map(node, 0);
1179 mt_gic_cascade_irq(gic_cnt, irq);
1180 }
1181 gic_cnt++;
1182
1183 /* FIXME: just used to test dump API */
1184 //mt_irq_dump_status(160);
1185
1186 return 0;
1187}
1188IRQCHIP_DECLARE(mt_gic, "mtk,mt-gic", mt_gic_of_init);
1189
1190EXPORT_SYMBOL(mt_irq_dump_status);
1191EXPORT_SYMBOL(mt_irq_set_polarity);
1192EXPORT_SYMBOL(mt_irq_set_sens);
1193
1194#endif
1195
1196