include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / avr32 / mach-at32ap / extint.c
CommitLineData
5f97f7f9
HS
1/*
2 * External interrupt handling for AT32AP CPUs
3 *
4 * Copyright (C) 2006 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/errno.h>
12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <linux/irq.h>
15#include <linux/platform_device.h>
16#include <linux/random.h>
5a0e3ad6 17#include <linux/slab.h>
5f97f7f9
HS
18
19#include <asm/io.h>
20
7a5b8059
HS
21/* EIC register offsets */
22#define EIC_IER 0x0000
23#define EIC_IDR 0x0004
24#define EIC_IMR 0x0008
25#define EIC_ISR 0x000c
26#define EIC_ICR 0x0010
27#define EIC_MODE 0x0014
28#define EIC_EDGE 0x0018
29#define EIC_LEVEL 0x001c
7a5b8059
HS
30#define EIC_NMIC 0x0024
31
7a5b8059 32/* Bitfields in NMIC */
e7ba176b 33#define EIC_NMIC_ENABLE (1 << 0)
7a5b8059
HS
34
35/* Bit manipulation macros */
36#define EIC_BIT(name) \
37 (1 << EIC_##name##_OFFSET)
38#define EIC_BF(name,value) \
39 (((value) & ((1 << EIC_##name##_SIZE) - 1)) \
40 << EIC_##name##_OFFSET)
41#define EIC_BFEXT(name,value) \
42 (((value) >> EIC_##name##_OFFSET) \
43 & ((1 << EIC_##name##_SIZE) - 1))
44#define EIC_BFINS(name,value,old) \
45 (((old) & ~(((1 << EIC_##name##_SIZE) - 1) \
46 << EIC_##name##_OFFSET)) \
47 | EIC_BF(name,value))
48
49/* Register access macros */
50#define eic_readl(port,reg) \
51 __raw_readl((port)->regs + EIC_##reg)
52#define eic_writel(port,reg,value) \
53 __raw_writel((value), (port)->regs + EIC_##reg)
54
55struct eic {
56 void __iomem *regs;
57 struct irq_chip *chip;
58 unsigned int first_irq;
59};
5f97f7f9 60
e7ba176b
HS
61static struct eic *nmi_eic;
62static bool nmi_enabled;
63
7a5b8059 64static void eic_ack_irq(unsigned int irq)
5f97f7f9 65{
7a5b8059
HS
66 struct eic *eic = get_irq_chip_data(irq);
67 eic_writel(eic, ICR, 1 << (irq - eic->first_irq));
5f97f7f9
HS
68}
69
7a5b8059 70static void eic_mask_irq(unsigned int irq)
5f97f7f9 71{
7a5b8059
HS
72 struct eic *eic = get_irq_chip_data(irq);
73 eic_writel(eic, IDR, 1 << (irq - eic->first_irq));
5f97f7f9
HS
74}
75
7a5b8059 76static void eic_mask_ack_irq(unsigned int irq)
5f97f7f9 77{
7a5b8059
HS
78 struct eic *eic = get_irq_chip_data(irq);
79 eic_writel(eic, ICR, 1 << (irq - eic->first_irq));
80 eic_writel(eic, IDR, 1 << (irq - eic->first_irq));
5f97f7f9
HS
81}
82
7a5b8059 83static void eic_unmask_irq(unsigned int irq)
5f97f7f9 84{
7a5b8059
HS
85 struct eic *eic = get_irq_chip_data(irq);
86 eic_writel(eic, IER, 1 << (irq - eic->first_irq));
5f97f7f9
HS
87}
88
7a5b8059 89static int eic_set_irq_type(unsigned int irq, unsigned int flow_type)
5f97f7f9 90{
7a5b8059 91 struct eic *eic = get_irq_chip_data(irq);
01cb087e 92 struct irq_desc *desc;
7a5b8059 93 unsigned int i = irq - eic->first_irq;
5f97f7f9 94 u32 mode, edge, level;
5f97f7f9
HS
95 int ret = 0;
96
58febc0b 97 flow_type &= IRQ_TYPE_SENSE_MASK;
01cb087e
HS
98 if (flow_type == IRQ_TYPE_NONE)
99 flow_type = IRQ_TYPE_LEVEL_LOW;
100
101 desc = &irq_desc[irq];
5f97f7f9 102
7a5b8059
HS
103 mode = eic_readl(eic, MODE);
104 edge = eic_readl(eic, EDGE);
105 level = eic_readl(eic, LEVEL);
5f97f7f9
HS
106
107 switch (flow_type) {
108 case IRQ_TYPE_LEVEL_LOW:
109 mode |= 1 << i;
110 level &= ~(1 << i);
111 break;
112 case IRQ_TYPE_LEVEL_HIGH:
113 mode |= 1 << i;
114 level |= 1 << i;
115 break;
116 case IRQ_TYPE_EDGE_RISING:
117 mode &= ~(1 << i);
118 edge |= 1 << i;
119 break;
120 case IRQ_TYPE_EDGE_FALLING:
121 mode &= ~(1 << i);
122 edge &= ~(1 << i);
123 break;
124 default:
125 ret = -EINVAL;
126 break;
127 }
128
58febc0b 129 if (ret == 0) {
7a5b8059
HS
130 eic_writel(eic, MODE, mode);
131 eic_writel(eic, EDGE, edge);
132 eic_writel(eic, LEVEL, level);
58febc0b 133
e4f586f2 134 if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) {
58febc0b 135 flow_type |= IRQ_LEVEL;
e4f586f2
DB
136 __set_irq_handler_unlocked(irq, handle_level_irq);
137 } else
138 __set_irq_handler_unlocked(irq, handle_edge_irq);
58febc0b
DB
139 desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
140 desc->status |= flow_type;
141 }
5f97f7f9 142
5f97f7f9
HS
143 return ret;
144}
145
86298962 146static struct irq_chip eic_chip = {
7a5b8059
HS
147 .name = "eic",
148 .ack = eic_ack_irq,
149 .mask = eic_mask_irq,
150 .mask_ack = eic_mask_ack_irq,
151 .unmask = eic_unmask_irq,
152 .set_type = eic_set_irq_type,
5f97f7f9
HS
153};
154
7a5b8059 155static void demux_eic_irq(unsigned int irq, struct irq_desc *desc)
5f97f7f9 156{
7a5b8059 157 struct eic *eic = desc->handler_data;
5f97f7f9 158 unsigned long status, pending;
e4f586f2 159 unsigned int i;
5f97f7f9 160
7a5b8059
HS
161 status = eic_readl(eic, ISR);
162 pending = status & eic_readl(eic, IMR);
5f97f7f9
HS
163
164 while (pending) {
165 i = fls(pending) - 1;
166 pending &= ~(1 << i);
167
e4f586f2 168 generic_handle_irq(i + eic->first_irq);
5f97f7f9 169 }
5f97f7f9
HS
170}
171
e7ba176b
HS
172int nmi_enable(void)
173{
174 nmi_enabled = true;
175
176 if (nmi_eic)
177 eic_writel(nmi_eic, NMIC, EIC_NMIC_ENABLE);
178
179 return 0;
180}
181
182void nmi_disable(void)
183{
184 if (nmi_eic)
185 eic_writel(nmi_eic, NMIC, 0);
186
187 nmi_enabled = false;
188}
189
7a5b8059 190static int __init eic_probe(struct platform_device *pdev)
5f97f7f9 191{
7a5b8059
HS
192 struct eic *eic;
193 struct resource *regs;
5f97f7f9 194 unsigned int i;
4b1135a2 195 unsigned int nr_of_irqs;
5f97f7f9 196 unsigned int int_irq;
7a5b8059 197 int ret;
5f97f7f9
HS
198 u32 pattern;
199
7a5b8059
HS
200 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
201 int_irq = platform_get_irq(pdev, 0);
202 if (!regs || !int_irq) {
203 dev_dbg(&pdev->dev, "missing regs and/or irq resource\n");
204 return -ENXIO;
205 }
206
207 ret = -ENOMEM;
208 eic = kzalloc(sizeof(struct eic), GFP_KERNEL);
209 if (!eic) {
210 dev_dbg(&pdev->dev, "no memory for eic structure\n");
211 goto err_kzalloc;
212 }
213
214 eic->first_irq = EIM_IRQ_BASE + 32 * pdev->id;
215 eic->regs = ioremap(regs->start, regs->end - regs->start + 1);
216 if (!eic->regs) {
217 dev_dbg(&pdev->dev, "failed to map regs\n");
218 goto err_ioremap;
219 }
5f97f7f9
HS
220
221 /*
222 * Find out how many interrupt lines that are actually
223 * implemented in hardware.
224 */
7a5b8059
HS
225 eic_writel(eic, IDR, ~0UL);
226 eic_writel(eic, MODE, ~0UL);
227 pattern = eic_readl(eic, MODE);
4b1135a2 228 nr_of_irqs = fls(pattern);
5f97f7f9 229
d6c49a7a 230 /* Trigger on low level unless overridden by driver */
7a5b8059 231 eic_writel(eic, EDGE, 0UL);
d6c49a7a 232 eic_writel(eic, LEVEL, 0UL);
01cb087e 233
7a5b8059 234 eic->chip = &eic_chip;
5f97f7f9 235
4b1135a2 236 for (i = 0; i < nr_of_irqs; i++) {
7a5b8059 237 set_irq_chip_and_handler(eic->first_irq + i, &eic_chip,
d6c49a7a 238 handle_level_irq);
7a5b8059 239 set_irq_chip_data(eic->first_irq + i, eic);
5f97f7f9
HS
240 }
241
7a5b8059
HS
242 set_irq_chained_handler(int_irq, demux_eic_irq);
243 set_irq_data(int_irq, eic);
5f97f7f9 244
e7ba176b
HS
245 if (pdev->id == 0) {
246 nmi_eic = eic;
247 if (nmi_enabled)
248 /*
249 * Someone tried to enable NMI before we were
250 * ready. Do it now.
251 */
252 nmi_enable();
253 }
254
7a5b8059
HS
255 dev_info(&pdev->dev,
256 "External Interrupt Controller at 0x%p, IRQ %u\n",
257 eic->regs, int_irq);
258 dev_info(&pdev->dev,
259 "Handling %u external IRQs, starting with IRQ %u\n",
4b1135a2 260 nr_of_irqs, eic->first_irq);
5f97f7f9
HS
261
262 return 0;
7a5b8059
HS
263
264err_ioremap:
265 kfree(eic);
266err_kzalloc:
267 return ret;
268}
269
270static struct platform_driver eic_driver = {
271 .driver = {
272 .name = "at32_eic",
273 },
274};
275
276static int __init eic_init(void)
277{
278 return platform_driver_probe(&eic_driver, eic_probe);
5f97f7f9 279}
7a5b8059 280arch_initcall(eic_init);