ARM: shmobile: force enable of r8a7790 arch timer
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / gpio / gpio-mvebu.c
1 /*
2 * GPIO driver for Marvell SoCs
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
7 * Andrew Lunn <andrew@lunn.ch>
8 * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
9 *
10 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
13 *
14 * This driver is a fairly straightforward GPIO driver for the
15 * complete family of Marvell EBU SoC platforms (Orion, Dove,
16 * Kirkwood, Discovery, Armada 370/XP). The only complexity of this
17 * driver is the different register layout that exists between the
18 * non-SMP platforms (Orion, Dove, Kirkwood, Armada 370) and the SMP
19 * platforms (MV78200 from the Discovery family and the Armada
20 * XP). Therefore, this driver handles three variants of the GPIO
21 * block:
22 * - the basic variant, called "orion-gpio", with the simplest
23 * register set. Used on Orion, Dove, Kirkwoord, Armada 370 and
24 * non-SMP Discovery systems
25 * - the mv78200 variant for MV78200 Discovery systems. This variant
26 * turns the edge mask and level mask registers into CPU0 edge
27 * mask/level mask registers, and adds CPU1 edge mask/level mask
28 * registers.
29 * - the armadaxp variant for Armada XP systems. This variant keeps
30 * the normal cause/edge mask/level mask registers when the global
31 * interrupts are used, but adds per-CPU cause/edge mask/level mask
32 * registers n a separate memory area for the per-CPU GPIO
33 * interrupts.
34 */
35
36 #include <linux/err.h>
37 #include <linux/module.h>
38 #include <linux/gpio.h>
39 #include <linux/irq.h>
40 #include <linux/slab.h>
41 #include <linux/irqdomain.h>
42 #include <linux/io.h>
43 #include <linux/of_irq.h>
44 #include <linux/of_device.h>
45 #include <linux/pinctrl/consumer.h>
46
47 /*
48 * GPIO unit register offsets.
49 */
50 #define GPIO_OUT_OFF 0x0000
51 #define GPIO_IO_CONF_OFF 0x0004
52 #define GPIO_BLINK_EN_OFF 0x0008
53 #define GPIO_IN_POL_OFF 0x000c
54 #define GPIO_DATA_IN_OFF 0x0010
55 #define GPIO_EDGE_CAUSE_OFF 0x0014
56 #define GPIO_EDGE_MASK_OFF 0x0018
57 #define GPIO_LEVEL_MASK_OFF 0x001c
58
59 /* The MV78200 has per-CPU registers for edge mask and level mask */
60 #define GPIO_EDGE_MASK_MV78200_OFF(cpu) ((cpu) ? 0x30 : 0x18)
61 #define GPIO_LEVEL_MASK_MV78200_OFF(cpu) ((cpu) ? 0x34 : 0x1C)
62
63 /* The Armada XP has per-CPU registers for interrupt cause, interrupt
64 * mask and interrupt level mask. Those are relative to the
65 * percpu_membase. */
66 #define GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu) ((cpu) * 0x4)
67 #define GPIO_EDGE_MASK_ARMADAXP_OFF(cpu) (0x10 + (cpu) * 0x4)
68 #define GPIO_LEVEL_MASK_ARMADAXP_OFF(cpu) (0x20 + (cpu) * 0x4)
69
70 #define MVEBU_GPIO_SOC_VARIANT_ORION 0x1
71 #define MVEBU_GPIO_SOC_VARIANT_MV78200 0x2
72 #define MVEBU_GPIO_SOC_VARIANT_ARMADAXP 0x3
73
74 #define MVEBU_MAX_GPIO_PER_BANK 32
75
76 struct mvebu_gpio_chip {
77 struct gpio_chip chip;
78 spinlock_t lock;
79 void __iomem *membase;
80 void __iomem *percpu_membase;
81 unsigned int irqbase;
82 struct irq_domain *domain;
83 int soc_variant;
84 };
85
86 /*
87 * Functions returning addresses of individual registers for a given
88 * GPIO controller.
89 */
90 static inline void __iomem *mvebu_gpioreg_out(struct mvebu_gpio_chip *mvchip)
91 {
92 return mvchip->membase + GPIO_OUT_OFF;
93 }
94
95 static inline void __iomem *mvebu_gpioreg_blink(struct mvebu_gpio_chip *mvchip)
96 {
97 return mvchip->membase + GPIO_BLINK_EN_OFF;
98 }
99
100 static inline void __iomem *mvebu_gpioreg_io_conf(struct mvebu_gpio_chip *mvchip)
101 {
102 return mvchip->membase + GPIO_IO_CONF_OFF;
103 }
104
105 static inline void __iomem *mvebu_gpioreg_in_pol(struct mvebu_gpio_chip *mvchip)
106 {
107 return mvchip->membase + GPIO_IN_POL_OFF;
108 }
109
110 static inline void __iomem *mvebu_gpioreg_data_in(struct mvebu_gpio_chip *mvchip)
111 {
112 return mvchip->membase + GPIO_DATA_IN_OFF;
113 }
114
115 static inline void __iomem *mvebu_gpioreg_edge_cause(struct mvebu_gpio_chip *mvchip)
116 {
117 int cpu;
118
119 switch(mvchip->soc_variant) {
120 case MVEBU_GPIO_SOC_VARIANT_ORION:
121 case MVEBU_GPIO_SOC_VARIANT_MV78200:
122 return mvchip->membase + GPIO_EDGE_CAUSE_OFF;
123 case MVEBU_GPIO_SOC_VARIANT_ARMADAXP:
124 cpu = smp_processor_id();
125 return mvchip->percpu_membase + GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu);
126 default:
127 BUG();
128 }
129 }
130
131 static inline void __iomem *mvebu_gpioreg_edge_mask(struct mvebu_gpio_chip *mvchip)
132 {
133 int cpu;
134
135 switch(mvchip->soc_variant) {
136 case MVEBU_GPIO_SOC_VARIANT_ORION:
137 return mvchip->membase + GPIO_EDGE_MASK_OFF;
138 case MVEBU_GPIO_SOC_VARIANT_MV78200:
139 cpu = smp_processor_id();
140 return mvchip->membase + GPIO_EDGE_MASK_MV78200_OFF(cpu);
141 case MVEBU_GPIO_SOC_VARIANT_ARMADAXP:
142 cpu = smp_processor_id();
143 return mvchip->percpu_membase + GPIO_EDGE_MASK_ARMADAXP_OFF(cpu);
144 default:
145 BUG();
146 }
147 }
148
149 static void __iomem *mvebu_gpioreg_level_mask(struct mvebu_gpio_chip *mvchip)
150 {
151 int cpu;
152
153 switch(mvchip->soc_variant) {
154 case MVEBU_GPIO_SOC_VARIANT_ORION:
155 return mvchip->membase + GPIO_LEVEL_MASK_OFF;
156 case MVEBU_GPIO_SOC_VARIANT_MV78200:
157 cpu = smp_processor_id();
158 return mvchip->membase + GPIO_LEVEL_MASK_MV78200_OFF(cpu);
159 case MVEBU_GPIO_SOC_VARIANT_ARMADAXP:
160 cpu = smp_processor_id();
161 return mvchip->percpu_membase + GPIO_LEVEL_MASK_ARMADAXP_OFF(cpu);
162 default:
163 BUG();
164 }
165 }
166
167 /*
168 * Functions implementing the gpio_chip methods
169 */
170
171 static int mvebu_gpio_request(struct gpio_chip *chip, unsigned pin)
172 {
173 return pinctrl_request_gpio(chip->base + pin);
174 }
175
176 static void mvebu_gpio_free(struct gpio_chip *chip, unsigned pin)
177 {
178 pinctrl_free_gpio(chip->base + pin);
179 }
180
181 static void mvebu_gpio_set(struct gpio_chip *chip, unsigned pin, int value)
182 {
183 struct mvebu_gpio_chip *mvchip =
184 container_of(chip, struct mvebu_gpio_chip, chip);
185 unsigned long flags;
186 u32 u;
187
188 spin_lock_irqsave(&mvchip->lock, flags);
189 u = readl_relaxed(mvebu_gpioreg_out(mvchip));
190 if (value)
191 u |= 1 << pin;
192 else
193 u &= ~(1 << pin);
194 writel_relaxed(u, mvebu_gpioreg_out(mvchip));
195 spin_unlock_irqrestore(&mvchip->lock, flags);
196 }
197
198 static int mvebu_gpio_get(struct gpio_chip *chip, unsigned pin)
199 {
200 struct mvebu_gpio_chip *mvchip =
201 container_of(chip, struct mvebu_gpio_chip, chip);
202 u32 u;
203
204 if (readl_relaxed(mvebu_gpioreg_io_conf(mvchip)) & (1 << pin)) {
205 u = readl_relaxed(mvebu_gpioreg_data_in(mvchip)) ^
206 readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
207 } else {
208 u = readl_relaxed(mvebu_gpioreg_out(mvchip));
209 }
210
211 return (u >> pin) & 1;
212 }
213
214 static void mvebu_gpio_blink(struct gpio_chip *chip, unsigned pin, int value)
215 {
216 struct mvebu_gpio_chip *mvchip =
217 container_of(chip, struct mvebu_gpio_chip, chip);
218 unsigned long flags;
219 u32 u;
220
221 spin_lock_irqsave(&mvchip->lock, flags);
222 u = readl_relaxed(mvebu_gpioreg_blink(mvchip));
223 if (value)
224 u |= 1 << pin;
225 else
226 u &= ~(1 << pin);
227 writel_relaxed(u, mvebu_gpioreg_blink(mvchip));
228 spin_unlock_irqrestore(&mvchip->lock, flags);
229 }
230
231 static int mvebu_gpio_direction_input(struct gpio_chip *chip, unsigned pin)
232 {
233 struct mvebu_gpio_chip *mvchip =
234 container_of(chip, struct mvebu_gpio_chip, chip);
235 unsigned long flags;
236 int ret;
237 u32 u;
238
239 /* Check with the pinctrl driver whether this pin is usable as
240 * an input GPIO */
241 ret = pinctrl_gpio_direction_input(chip->base + pin);
242 if (ret)
243 return ret;
244
245 spin_lock_irqsave(&mvchip->lock, flags);
246 u = readl_relaxed(mvebu_gpioreg_io_conf(mvchip));
247 u |= 1 << pin;
248 writel_relaxed(u, mvebu_gpioreg_io_conf(mvchip));
249 spin_unlock_irqrestore(&mvchip->lock, flags);
250
251 return 0;
252 }
253
254 static int mvebu_gpio_direction_output(struct gpio_chip *chip, unsigned pin,
255 int value)
256 {
257 struct mvebu_gpio_chip *mvchip =
258 container_of(chip, struct mvebu_gpio_chip, chip);
259 unsigned long flags;
260 int ret;
261 u32 u;
262
263 /* Check with the pinctrl driver whether this pin is usable as
264 * an output GPIO */
265 ret = pinctrl_gpio_direction_output(chip->base + pin);
266 if (ret)
267 return ret;
268
269 mvebu_gpio_blink(chip, pin, 0);
270 mvebu_gpio_set(chip, pin, value);
271
272 spin_lock_irqsave(&mvchip->lock, flags);
273 u = readl_relaxed(mvebu_gpioreg_io_conf(mvchip));
274 u &= ~(1 << pin);
275 writel_relaxed(u, mvebu_gpioreg_io_conf(mvchip));
276 spin_unlock_irqrestore(&mvchip->lock, flags);
277
278 return 0;
279 }
280
281 static int mvebu_gpio_to_irq(struct gpio_chip *chip, unsigned pin)
282 {
283 struct mvebu_gpio_chip *mvchip =
284 container_of(chip, struct mvebu_gpio_chip, chip);
285 return irq_create_mapping(mvchip->domain, pin);
286 }
287
288 /*
289 * Functions implementing the irq_chip methods
290 */
291 static void mvebu_gpio_irq_ack(struct irq_data *d)
292 {
293 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
294 struct mvebu_gpio_chip *mvchip = gc->private;
295 u32 mask = ~(1 << (d->irq - gc->irq_base));
296
297 irq_gc_lock(gc);
298 writel_relaxed(mask, mvebu_gpioreg_edge_cause(mvchip));
299 irq_gc_unlock(gc);
300 }
301
302 static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
303 {
304 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
305 struct mvebu_gpio_chip *mvchip = gc->private;
306 u32 mask = 1 << (d->irq - gc->irq_base);
307
308 irq_gc_lock(gc);
309 gc->mask_cache &= ~mask;
310 writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip));
311 irq_gc_unlock(gc);
312 }
313
314 static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
315 {
316 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
317 struct mvebu_gpio_chip *mvchip = gc->private;
318 u32 mask = 1 << (d->irq - gc->irq_base);
319
320 irq_gc_lock(gc);
321 gc->mask_cache |= mask;
322 writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip));
323 irq_gc_unlock(gc);
324 }
325
326 static void mvebu_gpio_level_irq_mask(struct irq_data *d)
327 {
328 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
329 struct mvebu_gpio_chip *mvchip = gc->private;
330 u32 mask = 1 << (d->irq - gc->irq_base);
331
332 irq_gc_lock(gc);
333 gc->mask_cache &= ~mask;
334 writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip));
335 irq_gc_unlock(gc);
336 }
337
338 static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
339 {
340 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
341 struct mvebu_gpio_chip *mvchip = gc->private;
342 u32 mask = 1 << (d->irq - gc->irq_base);
343
344 irq_gc_lock(gc);
345 gc->mask_cache |= mask;
346 writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip));
347 irq_gc_unlock(gc);
348 }
349
350 /*****************************************************************************
351 * MVEBU GPIO IRQ
352 *
353 * GPIO_IN_POL register controls whether GPIO_DATA_IN will hold the same
354 * value of the line or the opposite value.
355 *
356 * Level IRQ handlers: DATA_IN is used directly as cause register.
357 * Interrupt are masked by LEVEL_MASK registers.
358 * Edge IRQ handlers: Change in DATA_IN are latched in EDGE_CAUSE.
359 * Interrupt are masked by EDGE_MASK registers.
360 * Both-edge handlers: Similar to regular Edge handlers, but also swaps
361 * the polarity to catch the next line transaction.
362 * This is a race condition that might not perfectly
363 * work on some use cases.
364 *
365 * Every eight GPIO lines are grouped (OR'ed) before going up to main
366 * cause register.
367 *
368 * EDGE cause mask
369 * data-in /--------| |-----| |----\
370 * -----| |----- ---- to main cause reg
371 * X \----------------| |----/
372 * polarity LEVEL mask
373 *
374 ****************************************************************************/
375
376 static int mvebu_gpio_irq_set_type(struct irq_data *d, unsigned int type)
377 {
378 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
379 struct irq_chip_type *ct = irq_data_get_chip_type(d);
380 struct mvebu_gpio_chip *mvchip = gc->private;
381 int pin;
382 u32 u;
383
384 pin = d->hwirq;
385
386 u = readl_relaxed(mvebu_gpioreg_io_conf(mvchip)) & (1 << pin);
387 if (!u) {
388 return -EINVAL;
389 }
390
391 type &= IRQ_TYPE_SENSE_MASK;
392 if (type == IRQ_TYPE_NONE)
393 return -EINVAL;
394
395 /* Check if we need to change chip and handler */
396 if (!(ct->type & type))
397 if (irq_setup_alt_chip(d, type))
398 return -EINVAL;
399
400 /*
401 * Configure interrupt polarity.
402 */
403 switch(type) {
404 case IRQ_TYPE_EDGE_RISING:
405 case IRQ_TYPE_LEVEL_HIGH:
406 u = readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
407 u &= ~(1 << pin);
408 writel_relaxed(u, mvebu_gpioreg_in_pol(mvchip));
409 break;
410 case IRQ_TYPE_EDGE_FALLING:
411 case IRQ_TYPE_LEVEL_LOW:
412 u = readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
413 u |= 1 << pin;
414 writel_relaxed(u, mvebu_gpioreg_in_pol(mvchip));
415 break;
416 case IRQ_TYPE_EDGE_BOTH: {
417 u32 v;
418
419 v = readl_relaxed(mvebu_gpioreg_in_pol(mvchip)) ^
420 readl_relaxed(mvebu_gpioreg_data_in(mvchip));
421
422 /*
423 * set initial polarity based on current input level
424 */
425 u = readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
426 if (v & (1 << pin))
427 u |= 1 << pin; /* falling */
428 else
429 u &= ~(1 << pin); /* rising */
430 writel_relaxed(u, mvebu_gpioreg_in_pol(mvchip));
431 break;
432 }
433 }
434 return 0;
435 }
436
437 static void mvebu_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
438 {
439 struct mvebu_gpio_chip *mvchip = irq_get_handler_data(irq);
440 u32 cause, type;
441 int i;
442
443 if (mvchip == NULL)
444 return;
445
446 cause = readl_relaxed(mvebu_gpioreg_data_in(mvchip)) &
447 readl_relaxed(mvebu_gpioreg_level_mask(mvchip));
448 cause |= readl_relaxed(mvebu_gpioreg_edge_cause(mvchip)) &
449 readl_relaxed(mvebu_gpioreg_edge_mask(mvchip));
450
451 for (i = 0; i < mvchip->chip.ngpio; i++) {
452 int irq;
453
454 irq = mvchip->irqbase + i;
455
456 if (!(cause & (1 << i)))
457 continue;
458
459 type = irqd_get_trigger_type(irq_get_irq_data(irq));
460 if ((type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
461 /* Swap polarity (race with GPIO line) */
462 u32 polarity;
463
464 polarity = readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
465 polarity ^= 1 << i;
466 writel_relaxed(polarity, mvebu_gpioreg_in_pol(mvchip));
467 }
468 generic_handle_irq(irq);
469 }
470 }
471
472 static struct of_device_id mvebu_gpio_of_match[] = {
473 {
474 .compatible = "marvell,orion-gpio",
475 .data = (void*) MVEBU_GPIO_SOC_VARIANT_ORION,
476 },
477 {
478 .compatible = "marvell,mv78200-gpio",
479 .data = (void*) MVEBU_GPIO_SOC_VARIANT_MV78200,
480 },
481 {
482 .compatible = "marvell,armadaxp-gpio",
483 .data = (void*) MVEBU_GPIO_SOC_VARIANT_ARMADAXP,
484 },
485 {
486 /* sentinel */
487 },
488 };
489 MODULE_DEVICE_TABLE(of, mvebu_gpio_of_match);
490
491 static int mvebu_gpio_probe(struct platform_device *pdev)
492 {
493 struct mvebu_gpio_chip *mvchip;
494 const struct of_device_id *match;
495 struct device_node *np = pdev->dev.of_node;
496 struct resource *res;
497 struct irq_chip_generic *gc;
498 struct irq_chip_type *ct;
499 unsigned int ngpios;
500 int soc_variant;
501 int i, cpu, id;
502
503 match = of_match_device(mvebu_gpio_of_match, &pdev->dev);
504 if (match)
505 soc_variant = (int) match->data;
506 else
507 soc_variant = MVEBU_GPIO_SOC_VARIANT_ORION;
508
509 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
510 if (! res) {
511 dev_err(&pdev->dev, "Cannot get memory resource\n");
512 return -ENODEV;
513 }
514
515 mvchip = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_gpio_chip), GFP_KERNEL);
516 if (! mvchip){
517 dev_err(&pdev->dev, "Cannot allocate memory\n");
518 return -ENOMEM;
519 }
520
521 if (of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpios)) {
522 dev_err(&pdev->dev, "Missing ngpios OF property\n");
523 return -ENODEV;
524 }
525
526 id = of_alias_get_id(pdev->dev.of_node, "gpio");
527 if (id < 0) {
528 dev_err(&pdev->dev, "Couldn't get OF id\n");
529 return id;
530 }
531
532 mvchip->soc_variant = soc_variant;
533 mvchip->chip.label = dev_name(&pdev->dev);
534 mvchip->chip.dev = &pdev->dev;
535 mvchip->chip.request = mvebu_gpio_request;
536 mvchip->chip.free = mvebu_gpio_free;
537 mvchip->chip.direction_input = mvebu_gpio_direction_input;
538 mvchip->chip.get = mvebu_gpio_get;
539 mvchip->chip.direction_output = mvebu_gpio_direction_output;
540 mvchip->chip.set = mvebu_gpio_set;
541 mvchip->chip.to_irq = mvebu_gpio_to_irq;
542 mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK;
543 mvchip->chip.ngpio = ngpios;
544 mvchip->chip.can_sleep = 0;
545 mvchip->chip.of_node = np;
546
547 spin_lock_init(&mvchip->lock);
548 mvchip->membase = devm_ioremap_resource(&pdev->dev, res);
549 if (IS_ERR(mvchip->membase))
550 return PTR_ERR(mvchip->membase);
551
552 /* The Armada XP has a second range of registers for the
553 * per-CPU registers */
554 if (soc_variant == MVEBU_GPIO_SOC_VARIANT_ARMADAXP) {
555 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
556 if (! res) {
557 dev_err(&pdev->dev, "Cannot get memory resource\n");
558 return -ENODEV;
559 }
560
561 mvchip->percpu_membase = devm_ioremap_resource(&pdev->dev,
562 res);
563 if (IS_ERR(mvchip->percpu_membase))
564 return PTR_ERR(mvchip->percpu_membase);
565 }
566
567 /*
568 * Mask and clear GPIO interrupts.
569 */
570 switch(soc_variant) {
571 case MVEBU_GPIO_SOC_VARIANT_ORION:
572 writel_relaxed(0, mvchip->membase + GPIO_EDGE_CAUSE_OFF);
573 writel_relaxed(0, mvchip->membase + GPIO_EDGE_MASK_OFF);
574 writel_relaxed(0, mvchip->membase + GPIO_LEVEL_MASK_OFF);
575 break;
576 case MVEBU_GPIO_SOC_VARIANT_MV78200:
577 writel_relaxed(0, mvchip->membase + GPIO_EDGE_CAUSE_OFF);
578 for (cpu = 0; cpu < 2; cpu++) {
579 writel_relaxed(0, mvchip->membase +
580 GPIO_EDGE_MASK_MV78200_OFF(cpu));
581 writel_relaxed(0, mvchip->membase +
582 GPIO_LEVEL_MASK_MV78200_OFF(cpu));
583 }
584 break;
585 case MVEBU_GPIO_SOC_VARIANT_ARMADAXP:
586 writel_relaxed(0, mvchip->membase + GPIO_EDGE_CAUSE_OFF);
587 writel_relaxed(0, mvchip->membase + GPIO_EDGE_MASK_OFF);
588 writel_relaxed(0, mvchip->membase + GPIO_LEVEL_MASK_OFF);
589 for (cpu = 0; cpu < 4; cpu++) {
590 writel_relaxed(0, mvchip->percpu_membase +
591 GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu));
592 writel_relaxed(0, mvchip->percpu_membase +
593 GPIO_EDGE_MASK_ARMADAXP_OFF(cpu));
594 writel_relaxed(0, mvchip->percpu_membase +
595 GPIO_LEVEL_MASK_ARMADAXP_OFF(cpu));
596 }
597 break;
598 default:
599 BUG();
600 }
601
602 gpiochip_add(&mvchip->chip);
603
604 /* Some gpio controllers do not provide irq support */
605 if (!of_irq_count(np))
606 return 0;
607
608 /* Setup the interrupt handlers. Each chip can have up to 4
609 * interrupt handlers, with each handler dealing with 8 GPIO
610 * pins. */
611 for (i = 0; i < 4; i++) {
612 int irq;
613 irq = platform_get_irq(pdev, i);
614 if (irq < 0)
615 continue;
616 irq_set_handler_data(irq, mvchip);
617 irq_set_chained_handler(irq, mvebu_gpio_irq_handler);
618 }
619
620 mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1);
621 if (mvchip->irqbase < 0) {
622 dev_err(&pdev->dev, "no irqs\n");
623 return -ENOMEM;
624 }
625
626 gc = irq_alloc_generic_chip("mvebu_gpio_irq", 2, mvchip->irqbase,
627 mvchip->membase, handle_level_irq);
628 if (! gc) {
629 dev_err(&pdev->dev, "Cannot allocate generic irq_chip\n");
630 return -ENOMEM;
631 }
632
633 gc->private = mvchip;
634 ct = &gc->chip_types[0];
635 ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW;
636 ct->chip.irq_mask = mvebu_gpio_level_irq_mask;
637 ct->chip.irq_unmask = mvebu_gpio_level_irq_unmask;
638 ct->chip.irq_set_type = mvebu_gpio_irq_set_type;
639 ct->chip.name = mvchip->chip.label;
640
641 ct = &gc->chip_types[1];
642 ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
643 ct->chip.irq_ack = mvebu_gpio_irq_ack;
644 ct->chip.irq_mask = mvebu_gpio_edge_irq_mask;
645 ct->chip.irq_unmask = mvebu_gpio_edge_irq_unmask;
646 ct->chip.irq_set_type = mvebu_gpio_irq_set_type;
647 ct->handler = handle_edge_irq;
648 ct->chip.name = mvchip->chip.label;
649
650 irq_setup_generic_chip(gc, IRQ_MSK(ngpios), 0,
651 IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE);
652
653 /* Setup irq domain on top of the generic chip. */
654 mvchip->domain = irq_domain_add_simple(np, mvchip->chip.ngpio,
655 mvchip->irqbase,
656 &irq_domain_simple_ops,
657 mvchip);
658 if (!mvchip->domain) {
659 dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
660 mvchip->chip.label);
661 irq_remove_generic_chip(gc, IRQ_MSK(ngpios), IRQ_NOREQUEST,
662 IRQ_LEVEL | IRQ_NOPROBE);
663 kfree(gc);
664 return -ENODEV;
665 }
666
667 return 0;
668 }
669
670 static struct platform_driver mvebu_gpio_driver = {
671 .driver = {
672 .name = "mvebu-gpio",
673 .owner = THIS_MODULE,
674 .of_match_table = mvebu_gpio_of_match,
675 },
676 .probe = mvebu_gpio_probe,
677 };
678
679 static int __init mvebu_gpio_init(void)
680 {
681 return platform_driver_register(&mvebu_gpio_driver);
682 }
683 postcore_initcall(mvebu_gpio_init);