rtc: rtc-lpc32xx: use devm_rtc_device_register()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / gpio / gpio-mvebu.c
1 /*
2 * GPIO driver for Marvell SoCs
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
7 * Andrew Lunn <andrew@lunn.ch>
8 * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
9 *
10 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
13 *
14 * This driver is a fairly straightforward GPIO driver for the
15 * complete family of Marvell EBU SoC platforms (Orion, Dove,
16 * Kirkwood, Discovery, Armada 370/XP). The only complexity of this
17 * driver is the different register layout that exists between the
18 * non-SMP platforms (Orion, Dove, Kirkwood, Armada 370) and the SMP
19 * platforms (MV78200 from the Discovery family and the Armada
20 * XP). Therefore, this driver handles three variants of the GPIO
21 * block:
22 * - the basic variant, called "orion-gpio", with the simplest
23 * register set. Used on Orion, Dove, Kirkwoord, Armada 370 and
24 * non-SMP Discovery systems
25 * - the mv78200 variant for MV78200 Discovery systems. This variant
26 * turns the edge mask and level mask registers into CPU0 edge
27 * mask/level mask registers, and adds CPU1 edge mask/level mask
28 * registers.
29 * - the armadaxp variant for Armada XP systems. This variant keeps
30 * the normal cause/edge mask/level mask registers when the global
31 * interrupts are used, but adds per-CPU cause/edge mask/level mask
32 * registers n a separate memory area for the per-CPU GPIO
33 * interrupts.
34 */
35
36 #include <linux/err.h>
37 #include <linux/module.h>
38 #include <linux/gpio.h>
39 #include <linux/irq.h>
40 #include <linux/slab.h>
41 #include <linux/irqdomain.h>
42 #include <linux/io.h>
43 #include <linux/of_irq.h>
44 #include <linux/of_device.h>
45 #include <linux/clk.h>
46 #include <linux/pinctrl/consumer.h>
47
48 /*
49 * GPIO unit register offsets.
50 */
51 #define GPIO_OUT_OFF 0x0000
52 #define GPIO_IO_CONF_OFF 0x0004
53 #define GPIO_BLINK_EN_OFF 0x0008
54 #define GPIO_IN_POL_OFF 0x000c
55 #define GPIO_DATA_IN_OFF 0x0010
56 #define GPIO_EDGE_CAUSE_OFF 0x0014
57 #define GPIO_EDGE_MASK_OFF 0x0018
58 #define GPIO_LEVEL_MASK_OFF 0x001c
59
60 /* The MV78200 has per-CPU registers for edge mask and level mask */
61 #define GPIO_EDGE_MASK_MV78200_OFF(cpu) ((cpu) ? 0x30 : 0x18)
62 #define GPIO_LEVEL_MASK_MV78200_OFF(cpu) ((cpu) ? 0x34 : 0x1C)
63
64 /* The Armada XP has per-CPU registers for interrupt cause, interrupt
65 * mask and interrupt level mask. Those are relative to the
66 * percpu_membase. */
67 #define GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu) ((cpu) * 0x4)
68 #define GPIO_EDGE_MASK_ARMADAXP_OFF(cpu) (0x10 + (cpu) * 0x4)
69 #define GPIO_LEVEL_MASK_ARMADAXP_OFF(cpu) (0x20 + (cpu) * 0x4)
70
71 #define MVEBU_GPIO_SOC_VARIANT_ORION 0x1
72 #define MVEBU_GPIO_SOC_VARIANT_MV78200 0x2
73 #define MVEBU_GPIO_SOC_VARIANT_ARMADAXP 0x3
74
75 #define MVEBU_MAX_GPIO_PER_BANK 32
76
77 struct mvebu_gpio_chip {
78 struct gpio_chip chip;
79 spinlock_t lock;
80 void __iomem *membase;
81 void __iomem *percpu_membase;
82 unsigned int irqbase;
83 struct irq_domain *domain;
84 int soc_variant;
85 };
86
87 /*
88 * Functions returning addresses of individual registers for a given
89 * GPIO controller.
90 */
91 static inline void __iomem *mvebu_gpioreg_out(struct mvebu_gpio_chip *mvchip)
92 {
93 return mvchip->membase + GPIO_OUT_OFF;
94 }
95
96 static inline void __iomem *mvebu_gpioreg_blink(struct mvebu_gpio_chip *mvchip)
97 {
98 return mvchip->membase + GPIO_BLINK_EN_OFF;
99 }
100
101 static inline void __iomem *mvebu_gpioreg_io_conf(struct mvebu_gpio_chip *mvchip)
102 {
103 return mvchip->membase + GPIO_IO_CONF_OFF;
104 }
105
106 static inline void __iomem *mvebu_gpioreg_in_pol(struct mvebu_gpio_chip *mvchip)
107 {
108 return mvchip->membase + GPIO_IN_POL_OFF;
109 }
110
111 static inline void __iomem *mvebu_gpioreg_data_in(struct mvebu_gpio_chip *mvchip)
112 {
113 return mvchip->membase + GPIO_DATA_IN_OFF;
114 }
115
116 static inline void __iomem *mvebu_gpioreg_edge_cause(struct mvebu_gpio_chip *mvchip)
117 {
118 int cpu;
119
120 switch(mvchip->soc_variant) {
121 case MVEBU_GPIO_SOC_VARIANT_ORION:
122 case MVEBU_GPIO_SOC_VARIANT_MV78200:
123 return mvchip->membase + GPIO_EDGE_CAUSE_OFF;
124 case MVEBU_GPIO_SOC_VARIANT_ARMADAXP:
125 cpu = smp_processor_id();
126 return mvchip->percpu_membase + GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu);
127 default:
128 BUG();
129 }
130 }
131
132 static inline void __iomem *mvebu_gpioreg_edge_mask(struct mvebu_gpio_chip *mvchip)
133 {
134 int cpu;
135
136 switch(mvchip->soc_variant) {
137 case MVEBU_GPIO_SOC_VARIANT_ORION:
138 return mvchip->membase + GPIO_EDGE_MASK_OFF;
139 case MVEBU_GPIO_SOC_VARIANT_MV78200:
140 cpu = smp_processor_id();
141 return mvchip->membase + GPIO_EDGE_MASK_MV78200_OFF(cpu);
142 case MVEBU_GPIO_SOC_VARIANT_ARMADAXP:
143 cpu = smp_processor_id();
144 return mvchip->percpu_membase + GPIO_EDGE_MASK_ARMADAXP_OFF(cpu);
145 default:
146 BUG();
147 }
148 }
149
150 static void __iomem *mvebu_gpioreg_level_mask(struct mvebu_gpio_chip *mvchip)
151 {
152 int cpu;
153
154 switch(mvchip->soc_variant) {
155 case MVEBU_GPIO_SOC_VARIANT_ORION:
156 return mvchip->membase + GPIO_LEVEL_MASK_OFF;
157 case MVEBU_GPIO_SOC_VARIANT_MV78200:
158 cpu = smp_processor_id();
159 return mvchip->membase + GPIO_LEVEL_MASK_MV78200_OFF(cpu);
160 case MVEBU_GPIO_SOC_VARIANT_ARMADAXP:
161 cpu = smp_processor_id();
162 return mvchip->percpu_membase + GPIO_LEVEL_MASK_ARMADAXP_OFF(cpu);
163 default:
164 BUG();
165 }
166 }
167
168 /*
169 * Functions implementing the gpio_chip methods
170 */
171
172 static int mvebu_gpio_request(struct gpio_chip *chip, unsigned pin)
173 {
174 return pinctrl_request_gpio(chip->base + pin);
175 }
176
177 static void mvebu_gpio_free(struct gpio_chip *chip, unsigned pin)
178 {
179 pinctrl_free_gpio(chip->base + pin);
180 }
181
182 static void mvebu_gpio_set(struct gpio_chip *chip, unsigned pin, int value)
183 {
184 struct mvebu_gpio_chip *mvchip =
185 container_of(chip, struct mvebu_gpio_chip, chip);
186 unsigned long flags;
187 u32 u;
188
189 spin_lock_irqsave(&mvchip->lock, flags);
190 u = readl_relaxed(mvebu_gpioreg_out(mvchip));
191 if (value)
192 u |= 1 << pin;
193 else
194 u &= ~(1 << pin);
195 writel_relaxed(u, mvebu_gpioreg_out(mvchip));
196 spin_unlock_irqrestore(&mvchip->lock, flags);
197 }
198
199 static int mvebu_gpio_get(struct gpio_chip *chip, unsigned pin)
200 {
201 struct mvebu_gpio_chip *mvchip =
202 container_of(chip, struct mvebu_gpio_chip, chip);
203 u32 u;
204
205 if (readl_relaxed(mvebu_gpioreg_io_conf(mvchip)) & (1 << pin)) {
206 u = readl_relaxed(mvebu_gpioreg_data_in(mvchip)) ^
207 readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
208 } else {
209 u = readl_relaxed(mvebu_gpioreg_out(mvchip));
210 }
211
212 return (u >> pin) & 1;
213 }
214
215 static void mvebu_gpio_blink(struct gpio_chip *chip, unsigned pin, int value)
216 {
217 struct mvebu_gpio_chip *mvchip =
218 container_of(chip, struct mvebu_gpio_chip, chip);
219 unsigned long flags;
220 u32 u;
221
222 spin_lock_irqsave(&mvchip->lock, flags);
223 u = readl_relaxed(mvebu_gpioreg_blink(mvchip));
224 if (value)
225 u |= 1 << pin;
226 else
227 u &= ~(1 << pin);
228 writel_relaxed(u, mvebu_gpioreg_blink(mvchip));
229 spin_unlock_irqrestore(&mvchip->lock, flags);
230 }
231
232 static int mvebu_gpio_direction_input(struct gpio_chip *chip, unsigned pin)
233 {
234 struct mvebu_gpio_chip *mvchip =
235 container_of(chip, struct mvebu_gpio_chip, chip);
236 unsigned long flags;
237 int ret;
238 u32 u;
239
240 /* Check with the pinctrl driver whether this pin is usable as
241 * an input GPIO */
242 ret = pinctrl_gpio_direction_input(chip->base + pin);
243 if (ret)
244 return ret;
245
246 spin_lock_irqsave(&mvchip->lock, flags);
247 u = readl_relaxed(mvebu_gpioreg_io_conf(mvchip));
248 u |= 1 << pin;
249 writel_relaxed(u, mvebu_gpioreg_io_conf(mvchip));
250 spin_unlock_irqrestore(&mvchip->lock, flags);
251
252 return 0;
253 }
254
255 static int mvebu_gpio_direction_output(struct gpio_chip *chip, unsigned pin,
256 int value)
257 {
258 struct mvebu_gpio_chip *mvchip =
259 container_of(chip, struct mvebu_gpio_chip, chip);
260 unsigned long flags;
261 int ret;
262 u32 u;
263
264 /* Check with the pinctrl driver whether this pin is usable as
265 * an output GPIO */
266 ret = pinctrl_gpio_direction_output(chip->base + pin);
267 if (ret)
268 return ret;
269
270 mvebu_gpio_blink(chip, pin, 0);
271 mvebu_gpio_set(chip, pin, value);
272
273 spin_lock_irqsave(&mvchip->lock, flags);
274 u = readl_relaxed(mvebu_gpioreg_io_conf(mvchip));
275 u &= ~(1 << pin);
276 writel_relaxed(u, mvebu_gpioreg_io_conf(mvchip));
277 spin_unlock_irqrestore(&mvchip->lock, flags);
278
279 return 0;
280 }
281
282 static int mvebu_gpio_to_irq(struct gpio_chip *chip, unsigned pin)
283 {
284 struct mvebu_gpio_chip *mvchip =
285 container_of(chip, struct mvebu_gpio_chip, chip);
286 return irq_create_mapping(mvchip->domain, pin);
287 }
288
289 /*
290 * Functions implementing the irq_chip methods
291 */
292 static void mvebu_gpio_irq_ack(struct irq_data *d)
293 {
294 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
295 struct mvebu_gpio_chip *mvchip = gc->private;
296 u32 mask = ~(1 << (d->irq - gc->irq_base));
297
298 irq_gc_lock(gc);
299 writel_relaxed(mask, mvebu_gpioreg_edge_cause(mvchip));
300 irq_gc_unlock(gc);
301 }
302
303 static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
304 {
305 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
306 struct mvebu_gpio_chip *mvchip = gc->private;
307 u32 mask = 1 << (d->irq - gc->irq_base);
308
309 irq_gc_lock(gc);
310 gc->mask_cache &= ~mask;
311 writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip));
312 irq_gc_unlock(gc);
313 }
314
315 static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
316 {
317 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
318 struct mvebu_gpio_chip *mvchip = gc->private;
319 u32 mask = 1 << (d->irq - gc->irq_base);
320
321 irq_gc_lock(gc);
322 gc->mask_cache |= mask;
323 writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip));
324 irq_gc_unlock(gc);
325 }
326
327 static void mvebu_gpio_level_irq_mask(struct irq_data *d)
328 {
329 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
330 struct mvebu_gpio_chip *mvchip = gc->private;
331 u32 mask = 1 << (d->irq - gc->irq_base);
332
333 irq_gc_lock(gc);
334 gc->mask_cache &= ~mask;
335 writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip));
336 irq_gc_unlock(gc);
337 }
338
339 static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
340 {
341 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
342 struct mvebu_gpio_chip *mvchip = gc->private;
343 u32 mask = 1 << (d->irq - gc->irq_base);
344
345 irq_gc_lock(gc);
346 gc->mask_cache |= mask;
347 writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip));
348 irq_gc_unlock(gc);
349 }
350
351 /*****************************************************************************
352 * MVEBU GPIO IRQ
353 *
354 * GPIO_IN_POL register controls whether GPIO_DATA_IN will hold the same
355 * value of the line or the opposite value.
356 *
357 * Level IRQ handlers: DATA_IN is used directly as cause register.
358 * Interrupt are masked by LEVEL_MASK registers.
359 * Edge IRQ handlers: Change in DATA_IN are latched in EDGE_CAUSE.
360 * Interrupt are masked by EDGE_MASK registers.
361 * Both-edge handlers: Similar to regular Edge handlers, but also swaps
362 * the polarity to catch the next line transaction.
363 * This is a race condition that might not perfectly
364 * work on some use cases.
365 *
366 * Every eight GPIO lines are grouped (OR'ed) before going up to main
367 * cause register.
368 *
369 * EDGE cause mask
370 * data-in /--------| |-----| |----\
371 * -----| |----- ---- to main cause reg
372 * X \----------------| |----/
373 * polarity LEVEL mask
374 *
375 ****************************************************************************/
376
377 static int mvebu_gpio_irq_set_type(struct irq_data *d, unsigned int type)
378 {
379 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
380 struct irq_chip_type *ct = irq_data_get_chip_type(d);
381 struct mvebu_gpio_chip *mvchip = gc->private;
382 int pin;
383 u32 u;
384
385 pin = d->hwirq;
386
387 u = readl_relaxed(mvebu_gpioreg_io_conf(mvchip)) & (1 << pin);
388 if (!u) {
389 return -EINVAL;
390 }
391
392 type &= IRQ_TYPE_SENSE_MASK;
393 if (type == IRQ_TYPE_NONE)
394 return -EINVAL;
395
396 /* Check if we need to change chip and handler */
397 if (!(ct->type & type))
398 if (irq_setup_alt_chip(d, type))
399 return -EINVAL;
400
401 /*
402 * Configure interrupt polarity.
403 */
404 switch(type) {
405 case IRQ_TYPE_EDGE_RISING:
406 case IRQ_TYPE_LEVEL_HIGH:
407 u = readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
408 u &= ~(1 << pin);
409 writel_relaxed(u, mvebu_gpioreg_in_pol(mvchip));
410 break;
411 case IRQ_TYPE_EDGE_FALLING:
412 case IRQ_TYPE_LEVEL_LOW:
413 u = readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
414 u |= 1 << pin;
415 writel_relaxed(u, mvebu_gpioreg_in_pol(mvchip));
416 break;
417 case IRQ_TYPE_EDGE_BOTH: {
418 u32 v;
419
420 v = readl_relaxed(mvebu_gpioreg_in_pol(mvchip)) ^
421 readl_relaxed(mvebu_gpioreg_data_in(mvchip));
422
423 /*
424 * set initial polarity based on current input level
425 */
426 u = readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
427 if (v & (1 << pin))
428 u |= 1 << pin; /* falling */
429 else
430 u &= ~(1 << pin); /* rising */
431 writel_relaxed(u, mvebu_gpioreg_in_pol(mvchip));
432 break;
433 }
434 }
435 return 0;
436 }
437
438 static void mvebu_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
439 {
440 struct mvebu_gpio_chip *mvchip = irq_get_handler_data(irq);
441 u32 cause, type;
442 int i;
443
444 if (mvchip == NULL)
445 return;
446
447 cause = readl_relaxed(mvebu_gpioreg_data_in(mvchip)) &
448 readl_relaxed(mvebu_gpioreg_level_mask(mvchip));
449 cause |= readl_relaxed(mvebu_gpioreg_edge_cause(mvchip)) &
450 readl_relaxed(mvebu_gpioreg_edge_mask(mvchip));
451
452 for (i = 0; i < mvchip->chip.ngpio; i++) {
453 int irq;
454
455 irq = mvchip->irqbase + i;
456
457 if (!(cause & (1 << i)))
458 continue;
459
460 type = irqd_get_trigger_type(irq_get_irq_data(irq));
461 if ((type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
462 /* Swap polarity (race with GPIO line) */
463 u32 polarity;
464
465 polarity = readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
466 polarity ^= 1 << i;
467 writel_relaxed(polarity, mvebu_gpioreg_in_pol(mvchip));
468 }
469 generic_handle_irq(irq);
470 }
471 }
472
473 static struct of_device_id mvebu_gpio_of_match[] = {
474 {
475 .compatible = "marvell,orion-gpio",
476 .data = (void*) MVEBU_GPIO_SOC_VARIANT_ORION,
477 },
478 {
479 .compatible = "marvell,mv78200-gpio",
480 .data = (void*) MVEBU_GPIO_SOC_VARIANT_MV78200,
481 },
482 {
483 .compatible = "marvell,armadaxp-gpio",
484 .data = (void*) MVEBU_GPIO_SOC_VARIANT_ARMADAXP,
485 },
486 {
487 /* sentinel */
488 },
489 };
490 MODULE_DEVICE_TABLE(of, mvebu_gpio_of_match);
491
492 static int mvebu_gpio_probe(struct platform_device *pdev)
493 {
494 struct mvebu_gpio_chip *mvchip;
495 const struct of_device_id *match;
496 struct device_node *np = pdev->dev.of_node;
497 struct resource *res;
498 struct irq_chip_generic *gc;
499 struct irq_chip_type *ct;
500 struct clk *clk;
501 unsigned int ngpios;
502 int soc_variant;
503 int i, cpu, id;
504
505 match = of_match_device(mvebu_gpio_of_match, &pdev->dev);
506 if (match)
507 soc_variant = (int) match->data;
508 else
509 soc_variant = MVEBU_GPIO_SOC_VARIANT_ORION;
510
511 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
512 if (! res) {
513 dev_err(&pdev->dev, "Cannot get memory resource\n");
514 return -ENODEV;
515 }
516
517 mvchip = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_gpio_chip), GFP_KERNEL);
518 if (! mvchip){
519 dev_err(&pdev->dev, "Cannot allocate memory\n");
520 return -ENOMEM;
521 }
522
523 if (of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpios)) {
524 dev_err(&pdev->dev, "Missing ngpios OF property\n");
525 return -ENODEV;
526 }
527
528 id = of_alias_get_id(pdev->dev.of_node, "gpio");
529 if (id < 0) {
530 dev_err(&pdev->dev, "Couldn't get OF id\n");
531 return id;
532 }
533
534 clk = devm_clk_get(&pdev->dev, NULL);
535 /* Not all SoCs require a clock.*/
536 if (!IS_ERR(clk))
537 clk_prepare_enable(clk);
538
539 mvchip->soc_variant = soc_variant;
540 mvchip->chip.label = dev_name(&pdev->dev);
541 mvchip->chip.dev = &pdev->dev;
542 mvchip->chip.request = mvebu_gpio_request;
543 mvchip->chip.free = mvebu_gpio_free;
544 mvchip->chip.direction_input = mvebu_gpio_direction_input;
545 mvchip->chip.get = mvebu_gpio_get;
546 mvchip->chip.direction_output = mvebu_gpio_direction_output;
547 mvchip->chip.set = mvebu_gpio_set;
548 mvchip->chip.to_irq = mvebu_gpio_to_irq;
549 mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK;
550 mvchip->chip.ngpio = ngpios;
551 mvchip->chip.can_sleep = 0;
552 mvchip->chip.of_node = np;
553
554 spin_lock_init(&mvchip->lock);
555 mvchip->membase = devm_ioremap_resource(&pdev->dev, res);
556 if (IS_ERR(mvchip->membase))
557 return PTR_ERR(mvchip->membase);
558
559 /* The Armada XP has a second range of registers for the
560 * per-CPU registers */
561 if (soc_variant == MVEBU_GPIO_SOC_VARIANT_ARMADAXP) {
562 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
563 if (! res) {
564 dev_err(&pdev->dev, "Cannot get memory resource\n");
565 return -ENODEV;
566 }
567
568 mvchip->percpu_membase = devm_ioremap_resource(&pdev->dev,
569 res);
570 if (IS_ERR(mvchip->percpu_membase))
571 return PTR_ERR(mvchip->percpu_membase);
572 }
573
574 /*
575 * Mask and clear GPIO interrupts.
576 */
577 switch(soc_variant) {
578 case MVEBU_GPIO_SOC_VARIANT_ORION:
579 writel_relaxed(0, mvchip->membase + GPIO_EDGE_CAUSE_OFF);
580 writel_relaxed(0, mvchip->membase + GPIO_EDGE_MASK_OFF);
581 writel_relaxed(0, mvchip->membase + GPIO_LEVEL_MASK_OFF);
582 break;
583 case MVEBU_GPIO_SOC_VARIANT_MV78200:
584 writel_relaxed(0, mvchip->membase + GPIO_EDGE_CAUSE_OFF);
585 for (cpu = 0; cpu < 2; cpu++) {
586 writel_relaxed(0, mvchip->membase +
587 GPIO_EDGE_MASK_MV78200_OFF(cpu));
588 writel_relaxed(0, mvchip->membase +
589 GPIO_LEVEL_MASK_MV78200_OFF(cpu));
590 }
591 break;
592 case MVEBU_GPIO_SOC_VARIANT_ARMADAXP:
593 writel_relaxed(0, mvchip->membase + GPIO_EDGE_CAUSE_OFF);
594 writel_relaxed(0, mvchip->membase + GPIO_EDGE_MASK_OFF);
595 writel_relaxed(0, mvchip->membase + GPIO_LEVEL_MASK_OFF);
596 for (cpu = 0; cpu < 4; cpu++) {
597 writel_relaxed(0, mvchip->percpu_membase +
598 GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu));
599 writel_relaxed(0, mvchip->percpu_membase +
600 GPIO_EDGE_MASK_ARMADAXP_OFF(cpu));
601 writel_relaxed(0, mvchip->percpu_membase +
602 GPIO_LEVEL_MASK_ARMADAXP_OFF(cpu));
603 }
604 break;
605 default:
606 BUG();
607 }
608
609 gpiochip_add(&mvchip->chip);
610
611 /* Some gpio controllers do not provide irq support */
612 if (!of_irq_count(np))
613 return 0;
614
615 /* Setup the interrupt handlers. Each chip can have up to 4
616 * interrupt handlers, with each handler dealing with 8 GPIO
617 * pins. */
618 for (i = 0; i < 4; i++) {
619 int irq;
620 irq = platform_get_irq(pdev, i);
621 if (irq < 0)
622 continue;
623 irq_set_handler_data(irq, mvchip);
624 irq_set_chained_handler(irq, mvebu_gpio_irq_handler);
625 }
626
627 mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1);
628 if (mvchip->irqbase < 0) {
629 dev_err(&pdev->dev, "no irqs\n");
630 return -ENOMEM;
631 }
632
633 gc = irq_alloc_generic_chip("mvebu_gpio_irq", 2, mvchip->irqbase,
634 mvchip->membase, handle_level_irq);
635 if (! gc) {
636 dev_err(&pdev->dev, "Cannot allocate generic irq_chip\n");
637 return -ENOMEM;
638 }
639
640 gc->private = mvchip;
641 ct = &gc->chip_types[0];
642 ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW;
643 ct->chip.irq_mask = mvebu_gpio_level_irq_mask;
644 ct->chip.irq_unmask = mvebu_gpio_level_irq_unmask;
645 ct->chip.irq_set_type = mvebu_gpio_irq_set_type;
646 ct->chip.name = mvchip->chip.label;
647
648 ct = &gc->chip_types[1];
649 ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
650 ct->chip.irq_ack = mvebu_gpio_irq_ack;
651 ct->chip.irq_mask = mvebu_gpio_edge_irq_mask;
652 ct->chip.irq_unmask = mvebu_gpio_edge_irq_unmask;
653 ct->chip.irq_set_type = mvebu_gpio_irq_set_type;
654 ct->handler = handle_edge_irq;
655 ct->chip.name = mvchip->chip.label;
656
657 irq_setup_generic_chip(gc, IRQ_MSK(ngpios), 0,
658 IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE);
659
660 /* Setup irq domain on top of the generic chip. */
661 mvchip->domain = irq_domain_add_simple(np, mvchip->chip.ngpio,
662 mvchip->irqbase,
663 &irq_domain_simple_ops,
664 mvchip);
665 if (!mvchip->domain) {
666 dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
667 mvchip->chip.label);
668 irq_remove_generic_chip(gc, IRQ_MSK(ngpios), IRQ_NOREQUEST,
669 IRQ_LEVEL | IRQ_NOPROBE);
670 kfree(gc);
671 return -ENODEV;
672 }
673
674 return 0;
675 }
676
677 static struct platform_driver mvebu_gpio_driver = {
678 .driver = {
679 .name = "mvebu-gpio",
680 .owner = THIS_MODULE,
681 .of_match_table = mvebu_gpio_of_match,
682 },
683 .probe = mvebu_gpio_probe,
684 };
685
686 static int __init mvebu_gpio_init(void)
687 {
688 return platform_driver_register(&mvebu_gpio_driver);
689 }
690 postcore_initcall(mvebu_gpio_init);