Merge branch 'omap-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / mtd / onenand / omap2.c
1 /*
2 * linux/drivers/mtd/onenand/omap2.c
3 *
4 * OneNAND driver for OMAP2 / OMAP3
5 *
6 * Copyright © 2005-2006 Nokia Corporation
7 *
8 * Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and Juha Yrjölä
9 * IRQ and DMA support written by Timo Teras
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published by
13 * the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 * You should have received a copy of the GNU General Public License along with
21 * this program; see the file COPYING. If not, write to the Free Software
22 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 *
24 */
25
26 #include <linux/device.h>
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <linux/mtd/mtd.h>
30 #include <linux/mtd/onenand.h>
31 #include <linux/mtd/partitions.h>
32 #include <linux/platform_device.h>
33 #include <linux/interrupt.h>
34 #include <linux/delay.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/io.h>
37 #include <linux/slab.h>
38 #include <linux/regulator/consumer.h>
39
40 #include <asm/mach/flash.h>
41 #include <plat/gpmc.h>
42 #include <plat/onenand.h>
43 #include <mach/gpio.h>
44
45 #include <plat/dma.h>
46
47 #include <plat/board.h>
48
49 #define DRIVER_NAME "omap2-onenand"
50
51 #define ONENAND_IO_SIZE SZ_128K
52 #define ONENAND_BUFRAM_SIZE (1024 * 5)
53
54 struct omap2_onenand {
55 struct platform_device *pdev;
56 int gpmc_cs;
57 unsigned long phys_base;
58 int gpio_irq;
59 struct mtd_info mtd;
60 struct mtd_partition *parts;
61 struct onenand_chip onenand;
62 struct completion irq_done;
63 struct completion dma_done;
64 int dma_channel;
65 int freq;
66 int (*setup)(void __iomem *base, int *freq_ptr);
67 struct regulator *regulator;
68 };
69
70 #ifdef CONFIG_MTD_PARTITIONS
71 static const char *part_probes[] = { "cmdlinepart", NULL, };
72 #endif
73
74 static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
75 {
76 struct omap2_onenand *c = data;
77
78 complete(&c->dma_done);
79 }
80
81 static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
82 {
83 struct omap2_onenand *c = dev_id;
84
85 complete(&c->irq_done);
86
87 return IRQ_HANDLED;
88 }
89
90 static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
91 {
92 return readw(c->onenand.base + reg);
93 }
94
95 static inline void write_reg(struct omap2_onenand *c, unsigned short value,
96 int reg)
97 {
98 writew(value, c->onenand.base + reg);
99 }
100
101 static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
102 {
103 printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
104 msg, state, ctrl, intr);
105 }
106
107 static void wait_warn(char *msg, int state, unsigned int ctrl,
108 unsigned int intr)
109 {
110 printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
111 "intr 0x%04x\n", msg, state, ctrl, intr);
112 }
113
114 static int omap2_onenand_wait(struct mtd_info *mtd, int state)
115 {
116 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
117 struct onenand_chip *this = mtd->priv;
118 unsigned int intr = 0;
119 unsigned int ctrl, ctrl_mask;
120 unsigned long timeout;
121 u32 syscfg;
122
123 if (state == FL_RESETING || state == FL_PREPARING_ERASE ||
124 state == FL_VERIFYING_ERASE) {
125 int i = 21;
126 unsigned int intr_flags = ONENAND_INT_MASTER;
127
128 switch (state) {
129 case FL_RESETING:
130 intr_flags |= ONENAND_INT_RESET;
131 break;
132 case FL_PREPARING_ERASE:
133 intr_flags |= ONENAND_INT_ERASE;
134 break;
135 case FL_VERIFYING_ERASE:
136 i = 101;
137 break;
138 }
139
140 while (--i) {
141 udelay(1);
142 intr = read_reg(c, ONENAND_REG_INTERRUPT);
143 if (intr & ONENAND_INT_MASTER)
144 break;
145 }
146 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
147 if (ctrl & ONENAND_CTRL_ERROR) {
148 wait_err("controller error", state, ctrl, intr);
149 return -EIO;
150 }
151 if ((intr & intr_flags) == intr_flags)
152 return 0;
153 /* Continue in wait for interrupt branch */
154 }
155
156 if (state != FL_READING) {
157 int result;
158
159 /* Turn interrupts on */
160 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
161 if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
162 syscfg |= ONENAND_SYS_CFG1_IOBE;
163 write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
164 if (cpu_is_omap34xx())
165 /* Add a delay to let GPIO settle */
166 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
167 }
168
169 INIT_COMPLETION(c->irq_done);
170 if (c->gpio_irq) {
171 result = gpio_get_value(c->gpio_irq);
172 if (result == -1) {
173 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
174 intr = read_reg(c, ONENAND_REG_INTERRUPT);
175 wait_err("gpio error", state, ctrl, intr);
176 return -EIO;
177 }
178 } else
179 result = 0;
180 if (result == 0) {
181 int retry_cnt = 0;
182 retry:
183 result = wait_for_completion_timeout(&c->irq_done,
184 msecs_to_jiffies(20));
185 if (result == 0) {
186 /* Timeout after 20ms */
187 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
188 if (ctrl & ONENAND_CTRL_ONGO &&
189 !this->ongoing) {
190 /*
191 * The operation seems to be still going
192 * so give it some more time.
193 */
194 retry_cnt += 1;
195 if (retry_cnt < 3)
196 goto retry;
197 intr = read_reg(c,
198 ONENAND_REG_INTERRUPT);
199 wait_err("timeout", state, ctrl, intr);
200 return -EIO;
201 }
202 intr = read_reg(c, ONENAND_REG_INTERRUPT);
203 if ((intr & ONENAND_INT_MASTER) == 0)
204 wait_warn("timeout", state, ctrl, intr);
205 }
206 }
207 } else {
208 int retry_cnt = 0;
209
210 /* Turn interrupts off */
211 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
212 syscfg &= ~ONENAND_SYS_CFG1_IOBE;
213 write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
214
215 timeout = jiffies + msecs_to_jiffies(20);
216 while (1) {
217 if (time_before(jiffies, timeout)) {
218 intr = read_reg(c, ONENAND_REG_INTERRUPT);
219 if (intr & ONENAND_INT_MASTER)
220 break;
221 } else {
222 /* Timeout after 20ms */
223 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
224 if (ctrl & ONENAND_CTRL_ONGO) {
225 /*
226 * The operation seems to be still going
227 * so give it some more time.
228 */
229 retry_cnt += 1;
230 if (retry_cnt < 3) {
231 timeout = jiffies +
232 msecs_to_jiffies(20);
233 continue;
234 }
235 }
236 break;
237 }
238 }
239 }
240
241 intr = read_reg(c, ONENAND_REG_INTERRUPT);
242 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
243
244 if (intr & ONENAND_INT_READ) {
245 int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
246
247 if (ecc) {
248 unsigned int addr1, addr8;
249
250 addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
251 addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
252 if (ecc & ONENAND_ECC_2BIT_ALL) {
253 printk(KERN_ERR "onenand_wait: ECC error = "
254 "0x%04x, addr1 %#x, addr8 %#x\n",
255 ecc, addr1, addr8);
256 mtd->ecc_stats.failed++;
257 return -EBADMSG;
258 } else if (ecc & ONENAND_ECC_1BIT_ALL) {
259 printk(KERN_NOTICE "onenand_wait: correctable "
260 "ECC error = 0x%04x, addr1 %#x, "
261 "addr8 %#x\n", ecc, addr1, addr8);
262 mtd->ecc_stats.corrected++;
263 }
264 }
265 } else if (state == FL_READING) {
266 wait_err("timeout", state, ctrl, intr);
267 return -EIO;
268 }
269
270 if (ctrl & ONENAND_CTRL_ERROR) {
271 wait_err("controller error", state, ctrl, intr);
272 if (ctrl & ONENAND_CTRL_LOCK)
273 printk(KERN_ERR "onenand_wait: "
274 "Device is write protected!!!\n");
275 return -EIO;
276 }
277
278 ctrl_mask = 0xFE9F;
279 if (this->ongoing)
280 ctrl_mask &= ~0x8000;
281
282 if (ctrl & ctrl_mask)
283 wait_warn("unexpected controller status", state, ctrl, intr);
284
285 return 0;
286 }
287
288 static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
289 {
290 struct onenand_chip *this = mtd->priv;
291
292 if (ONENAND_CURRENT_BUFFERRAM(this)) {
293 if (area == ONENAND_DATARAM)
294 return this->writesize;
295 if (area == ONENAND_SPARERAM)
296 return mtd->oobsize;
297 }
298
299 return 0;
300 }
301
302 #if defined(CONFIG_ARCH_OMAP3) || defined(MULTI_OMAP2)
303
304 static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
305 unsigned char *buffer, int offset,
306 size_t count)
307 {
308 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
309 struct onenand_chip *this = mtd->priv;
310 dma_addr_t dma_src, dma_dst;
311 int bram_offset;
312 unsigned long timeout;
313 void *buf = (void *)buffer;
314 size_t xtra;
315 volatile unsigned *done;
316
317 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
318 if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
319 goto out_copy;
320
321 /* panic_write() may be in an interrupt context */
322 if (in_interrupt() || oops_in_progress)
323 goto out_copy;
324
325 if (buf >= high_memory) {
326 struct page *p1;
327
328 if (((size_t)buf & PAGE_MASK) !=
329 ((size_t)(buf + count - 1) & PAGE_MASK))
330 goto out_copy;
331 p1 = vmalloc_to_page(buf);
332 if (!p1)
333 goto out_copy;
334 buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
335 }
336
337 xtra = count & 3;
338 if (xtra) {
339 count -= xtra;
340 memcpy(buf + count, this->base + bram_offset + count, xtra);
341 }
342
343 dma_src = c->phys_base + bram_offset;
344 dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE);
345 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
346 dev_err(&c->pdev->dev,
347 "Couldn't DMA map a %d byte buffer\n",
348 count);
349 goto out_copy;
350 }
351
352 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
353 count >> 2, 1, 0, 0, 0);
354 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
355 dma_src, 0, 0);
356 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
357 dma_dst, 0, 0);
358
359 INIT_COMPLETION(c->dma_done);
360 omap_start_dma(c->dma_channel);
361
362 timeout = jiffies + msecs_to_jiffies(20);
363 done = &c->dma_done.done;
364 while (time_before(jiffies, timeout))
365 if (*done)
366 break;
367
368 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
369
370 if (!*done) {
371 dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
372 goto out_copy;
373 }
374
375 return 0;
376
377 out_copy:
378 memcpy(buf, this->base + bram_offset, count);
379 return 0;
380 }
381
382 static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
383 const unsigned char *buffer,
384 int offset, size_t count)
385 {
386 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
387 struct onenand_chip *this = mtd->priv;
388 dma_addr_t dma_src, dma_dst;
389 int bram_offset;
390 unsigned long timeout;
391 void *buf = (void *)buffer;
392 volatile unsigned *done;
393
394 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
395 if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
396 goto out_copy;
397
398 /* panic_write() may be in an interrupt context */
399 if (in_interrupt() || oops_in_progress)
400 goto out_copy;
401
402 if (buf >= high_memory) {
403 struct page *p1;
404
405 if (((size_t)buf & PAGE_MASK) !=
406 ((size_t)(buf + count - 1) & PAGE_MASK))
407 goto out_copy;
408 p1 = vmalloc_to_page(buf);
409 if (!p1)
410 goto out_copy;
411 buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
412 }
413
414 dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
415 dma_dst = c->phys_base + bram_offset;
416 if (dma_mapping_error(&c->pdev->dev, dma_src)) {
417 dev_err(&c->pdev->dev,
418 "Couldn't DMA map a %d byte buffer\n",
419 count);
420 return -1;
421 }
422
423 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
424 count >> 2, 1, 0, 0, 0);
425 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
426 dma_src, 0, 0);
427 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
428 dma_dst, 0, 0);
429
430 INIT_COMPLETION(c->dma_done);
431 omap_start_dma(c->dma_channel);
432
433 timeout = jiffies + msecs_to_jiffies(20);
434 done = &c->dma_done.done;
435 while (time_before(jiffies, timeout))
436 if (*done)
437 break;
438
439 dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
440
441 if (!*done) {
442 dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
443 goto out_copy;
444 }
445
446 return 0;
447
448 out_copy:
449 memcpy(this->base + bram_offset, buf, count);
450 return 0;
451 }
452
453 #else
454
455 int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
456 unsigned char *buffer, int offset,
457 size_t count);
458
459 int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
460 const unsigned char *buffer,
461 int offset, size_t count);
462
463 #endif
464
465 #if defined(CONFIG_ARCH_OMAP2) || defined(MULTI_OMAP2)
466
467 static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
468 unsigned char *buffer, int offset,
469 size_t count)
470 {
471 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
472 struct onenand_chip *this = mtd->priv;
473 dma_addr_t dma_src, dma_dst;
474 int bram_offset;
475
476 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
477 /* DMA is not used. Revisit PM requirements before enabling it. */
478 if (1 || (c->dma_channel < 0) ||
479 ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
480 (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
481 memcpy(buffer, (__force void *)(this->base + bram_offset),
482 count);
483 return 0;
484 }
485
486 dma_src = c->phys_base + bram_offset;
487 dma_dst = dma_map_single(&c->pdev->dev, buffer, count,
488 DMA_FROM_DEVICE);
489 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
490 dev_err(&c->pdev->dev,
491 "Couldn't DMA map a %d byte buffer\n",
492 count);
493 return -1;
494 }
495
496 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
497 count / 4, 1, 0, 0, 0);
498 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
499 dma_src, 0, 0);
500 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
501 dma_dst, 0, 0);
502
503 INIT_COMPLETION(c->dma_done);
504 omap_start_dma(c->dma_channel);
505 wait_for_completion(&c->dma_done);
506
507 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
508
509 return 0;
510 }
511
512 static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
513 const unsigned char *buffer,
514 int offset, size_t count)
515 {
516 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
517 struct onenand_chip *this = mtd->priv;
518 dma_addr_t dma_src, dma_dst;
519 int bram_offset;
520
521 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
522 /* DMA is not used. Revisit PM requirements before enabling it. */
523 if (1 || (c->dma_channel < 0) ||
524 ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
525 (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
526 memcpy((__force void *)(this->base + bram_offset), buffer,
527 count);
528 return 0;
529 }
530
531 dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
532 DMA_TO_DEVICE);
533 dma_dst = c->phys_base + bram_offset;
534 if (dma_mapping_error(&c->pdev->dev, dma_src)) {
535 dev_err(&c->pdev->dev,
536 "Couldn't DMA map a %d byte buffer\n",
537 count);
538 return -1;
539 }
540
541 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S16,
542 count / 2, 1, 0, 0, 0);
543 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
544 dma_src, 0, 0);
545 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
546 dma_dst, 0, 0);
547
548 INIT_COMPLETION(c->dma_done);
549 omap_start_dma(c->dma_channel);
550 wait_for_completion(&c->dma_done);
551
552 dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
553
554 return 0;
555 }
556
557 #else
558
559 int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
560 unsigned char *buffer, int offset,
561 size_t count);
562
563 int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
564 const unsigned char *buffer,
565 int offset, size_t count);
566
567 #endif
568
569 static struct platform_driver omap2_onenand_driver;
570
571 static int __adjust_timing(struct device *dev, void *data)
572 {
573 int ret = 0;
574 struct omap2_onenand *c;
575
576 c = dev_get_drvdata(dev);
577
578 BUG_ON(c->setup == NULL);
579
580 /* DMA is not in use so this is all that is needed */
581 /* Revisit for OMAP3! */
582 ret = c->setup(c->onenand.base, &c->freq);
583
584 return ret;
585 }
586
587 int omap2_onenand_rephase(void)
588 {
589 return driver_for_each_device(&omap2_onenand_driver.driver, NULL,
590 NULL, __adjust_timing);
591 }
592
593 static void omap2_onenand_shutdown(struct platform_device *pdev)
594 {
595 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
596
597 /* With certain content in the buffer RAM, the OMAP boot ROM code
598 * can recognize the flash chip incorrectly. Zero it out before
599 * soft reset.
600 */
601 memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
602 }
603
604 static int omap2_onenand_enable(struct mtd_info *mtd)
605 {
606 int ret;
607 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
608
609 ret = regulator_enable(c->regulator);
610 if (ret != 0)
611 dev_err(&c->pdev->dev, "cant enable regulator\n");
612
613 return ret;
614 }
615
616 static int omap2_onenand_disable(struct mtd_info *mtd)
617 {
618 int ret;
619 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
620
621 ret = regulator_disable(c->regulator);
622 if (ret != 0)
623 dev_err(&c->pdev->dev, "cant disable regulator\n");
624
625 return ret;
626 }
627
628 static int __devinit omap2_onenand_probe(struct platform_device *pdev)
629 {
630 struct omap_onenand_platform_data *pdata;
631 struct omap2_onenand *c;
632 int r;
633
634 pdata = pdev->dev.platform_data;
635 if (pdata == NULL) {
636 dev_err(&pdev->dev, "platform data missing\n");
637 return -ENODEV;
638 }
639
640 c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL);
641 if (!c)
642 return -ENOMEM;
643
644 init_completion(&c->irq_done);
645 init_completion(&c->dma_done);
646 c->gpmc_cs = pdata->cs;
647 c->gpio_irq = pdata->gpio_irq;
648 c->dma_channel = pdata->dma_channel;
649 if (c->dma_channel < 0) {
650 /* if -1, don't use DMA */
651 c->gpio_irq = 0;
652 }
653
654 r = gpmc_cs_request(c->gpmc_cs, ONENAND_IO_SIZE, &c->phys_base);
655 if (r < 0) {
656 dev_err(&pdev->dev, "Cannot request GPMC CS\n");
657 goto err_kfree;
658 }
659
660 if (request_mem_region(c->phys_base, ONENAND_IO_SIZE,
661 pdev->dev.driver->name) == NULL) {
662 dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, "
663 "size: 0x%x\n", c->phys_base, ONENAND_IO_SIZE);
664 r = -EBUSY;
665 goto err_free_cs;
666 }
667 c->onenand.base = ioremap(c->phys_base, ONENAND_IO_SIZE);
668 if (c->onenand.base == NULL) {
669 r = -ENOMEM;
670 goto err_release_mem_region;
671 }
672
673 if (pdata->onenand_setup != NULL) {
674 r = pdata->onenand_setup(c->onenand.base, &c->freq);
675 if (r < 0) {
676 dev_err(&pdev->dev, "Onenand platform setup failed: "
677 "%d\n", r);
678 goto err_iounmap;
679 }
680 c->setup = pdata->onenand_setup;
681 }
682
683 if (c->gpio_irq) {
684 if ((r = gpio_request(c->gpio_irq, "OneNAND irq")) < 0) {
685 dev_err(&pdev->dev, "Failed to request GPIO%d for "
686 "OneNAND\n", c->gpio_irq);
687 goto err_iounmap;
688 }
689 gpio_direction_input(c->gpio_irq);
690
691 if ((r = request_irq(gpio_to_irq(c->gpio_irq),
692 omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
693 pdev->dev.driver->name, c)) < 0)
694 goto err_release_gpio;
695 }
696
697 if (c->dma_channel >= 0) {
698 r = omap_request_dma(0, pdev->dev.driver->name,
699 omap2_onenand_dma_cb, (void *) c,
700 &c->dma_channel);
701 if (r == 0) {
702 omap_set_dma_write_mode(c->dma_channel,
703 OMAP_DMA_WRITE_NON_POSTED);
704 omap_set_dma_src_data_pack(c->dma_channel, 1);
705 omap_set_dma_src_burst_mode(c->dma_channel,
706 OMAP_DMA_DATA_BURST_8);
707 omap_set_dma_dest_data_pack(c->dma_channel, 1);
708 omap_set_dma_dest_burst_mode(c->dma_channel,
709 OMAP_DMA_DATA_BURST_8);
710 } else {
711 dev_info(&pdev->dev,
712 "failed to allocate DMA for OneNAND, "
713 "using PIO instead\n");
714 c->dma_channel = -1;
715 }
716 }
717
718 dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
719 "base %p, freq %d MHz\n", c->gpmc_cs, c->phys_base,
720 c->onenand.base, c->freq);
721
722 c->pdev = pdev;
723 c->mtd.name = dev_name(&pdev->dev);
724 c->mtd.priv = &c->onenand;
725 c->mtd.owner = THIS_MODULE;
726
727 c->mtd.dev.parent = &pdev->dev;
728
729 if (c->dma_channel >= 0) {
730 struct onenand_chip *this = &c->onenand;
731
732 this->wait = omap2_onenand_wait;
733 if (cpu_is_omap34xx()) {
734 this->read_bufferram = omap3_onenand_read_bufferram;
735 this->write_bufferram = omap3_onenand_write_bufferram;
736 } else {
737 this->read_bufferram = omap2_onenand_read_bufferram;
738 this->write_bufferram = omap2_onenand_write_bufferram;
739 }
740 }
741
742 if (pdata->regulator_can_sleep) {
743 c->regulator = regulator_get(&pdev->dev, "vonenand");
744 if (IS_ERR(c->regulator)) {
745 dev_err(&pdev->dev, "Failed to get regulator\n");
746 goto err_release_dma;
747 }
748 c->onenand.enable = omap2_onenand_enable;
749 c->onenand.disable = omap2_onenand_disable;
750 }
751
752 if ((r = onenand_scan(&c->mtd, 1)) < 0)
753 goto err_release_regulator;
754
755 #ifdef CONFIG_MTD_PARTITIONS
756 r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0);
757 if (r > 0)
758 r = add_mtd_partitions(&c->mtd, c->parts, r);
759 else if (pdata->parts != NULL)
760 r = add_mtd_partitions(&c->mtd, pdata->parts, pdata->nr_parts);
761 else
762 #endif
763 r = add_mtd_device(&c->mtd);
764 if (r)
765 goto err_release_onenand;
766
767 platform_set_drvdata(pdev, c);
768
769 return 0;
770
771 err_release_onenand:
772 onenand_release(&c->mtd);
773 err_release_regulator:
774 regulator_put(c->regulator);
775 err_release_dma:
776 if (c->dma_channel != -1)
777 omap_free_dma(c->dma_channel);
778 if (c->gpio_irq)
779 free_irq(gpio_to_irq(c->gpio_irq), c);
780 err_release_gpio:
781 if (c->gpio_irq)
782 gpio_free(c->gpio_irq);
783 err_iounmap:
784 iounmap(c->onenand.base);
785 err_release_mem_region:
786 release_mem_region(c->phys_base, ONENAND_IO_SIZE);
787 err_free_cs:
788 gpmc_cs_free(c->gpmc_cs);
789 err_kfree:
790 kfree(c->parts);
791 kfree(c);
792
793 return r;
794 }
795
796 static int __devexit omap2_onenand_remove(struct platform_device *pdev)
797 {
798 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
799
800 onenand_release(&c->mtd);
801 regulator_put(c->regulator);
802 if (c->dma_channel != -1)
803 omap_free_dma(c->dma_channel);
804 omap2_onenand_shutdown(pdev);
805 platform_set_drvdata(pdev, NULL);
806 if (c->gpio_irq) {
807 free_irq(gpio_to_irq(c->gpio_irq), c);
808 gpio_free(c->gpio_irq);
809 }
810 iounmap(c->onenand.base);
811 release_mem_region(c->phys_base, ONENAND_IO_SIZE);
812 gpmc_cs_free(c->gpmc_cs);
813 kfree(c->parts);
814 kfree(c);
815
816 return 0;
817 }
818
819 static struct platform_driver omap2_onenand_driver = {
820 .probe = omap2_onenand_probe,
821 .remove = __devexit_p(omap2_onenand_remove),
822 .shutdown = omap2_onenand_shutdown,
823 .driver = {
824 .name = DRIVER_NAME,
825 .owner = THIS_MODULE,
826 },
827 };
828
829 static int __init omap2_onenand_init(void)
830 {
831 printk(KERN_INFO "OneNAND driver initializing\n");
832 return platform_driver_register(&omap2_onenand_driver);
833 }
834
835 static void __exit omap2_onenand_exit(void)
836 {
837 platform_driver_unregister(&omap2_onenand_driver);
838 }
839
840 module_init(omap2_onenand_init);
841 module_exit(omap2_onenand_exit);
842
843 MODULE_ALIAS("platform:" DRIVER_NAME);
844 MODULE_LICENSE("GPL");
845 MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
846 MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");