sparc: convert to use __HEAD and HEAD_TEXT macros.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / block / mg_disk.c
CommitLineData
3fbed4c6
K
1/*
2 * drivers/block/mg_disk.c
3 *
4 * Support for the mGine m[g]flash IO mode.
5 * Based on legacy hd.c
6 *
7 * (c) 2008 mGine Co.,LTD
8 * (c) 2008 unsik Kim <donari75@gmail.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/fs.h>
18#include <linux/blkdev.h>
19#include <linux/hdreg.h>
20#include <linux/libata.h>
21#include <linux/interrupt.h>
22#include <linux/delay.h>
23#include <linux/platform_device.h>
24#include <linux/gpio.h>
25#include <linux/mg_disk.h>
26
27#define MG_RES_SEC (CONFIG_MG_DISK_RES << 1)
28
29static void mg_request(struct request_queue *);
30
31static void mg_dump_status(const char *msg, unsigned int stat,
32 struct mg_host *host)
33{
34 char *name = MG_DISK_NAME;
35 struct request *req;
36
37 if (host->breq) {
38 req = elv_next_request(host->breq);
39 if (req)
40 name = req->rq_disk->disk_name;
41 }
42
43 printk(KERN_ERR "%s: %s: status=0x%02x { ", name, msg, stat & 0xff);
44 if (stat & MG_REG_STATUS_BIT_BUSY)
45 printk("Busy ");
46 if (stat & MG_REG_STATUS_BIT_READY)
47 printk("DriveReady ");
48 if (stat & MG_REG_STATUS_BIT_WRITE_FAULT)
49 printk("WriteFault ");
50 if (stat & MG_REG_STATUS_BIT_SEEK_DONE)
51 printk("SeekComplete ");
52 if (stat & MG_REG_STATUS_BIT_DATA_REQ)
53 printk("DataRequest ");
54 if (stat & MG_REG_STATUS_BIT_CORRECTED_ERROR)
55 printk("CorrectedError ");
56 if (stat & MG_REG_STATUS_BIT_ERROR)
57 printk("Error ");
58 printk("}\n");
59 if ((stat & MG_REG_STATUS_BIT_ERROR) == 0) {
60 host->error = 0;
61 } else {
62 host->error = inb((unsigned long)host->dev_base + MG_REG_ERROR);
63 printk(KERN_ERR "%s: %s: error=0x%02x { ", name, msg,
64 host->error & 0xff);
65 if (host->error & MG_REG_ERR_BBK)
66 printk("BadSector ");
67 if (host->error & MG_REG_ERR_UNC)
68 printk("UncorrectableError ");
69 if (host->error & MG_REG_ERR_IDNF)
70 printk("SectorIdNotFound ");
71 if (host->error & MG_REG_ERR_ABRT)
72 printk("DriveStatusError ");
73 if (host->error & MG_REG_ERR_AMNF)
74 printk("AddrMarkNotFound ");
75 printk("}");
76 if (host->error &
77 (MG_REG_ERR_BBK | MG_REG_ERR_UNC |
78 MG_REG_ERR_IDNF | MG_REG_ERR_AMNF)) {
79 if (host->breq) {
80 req = elv_next_request(host->breq);
81 if (req)
82 printk(", sector=%ld", req->sector);
83 }
84
85 }
86 printk("\n");
87 }
88}
89
90static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
91{
92 u8 status;
93 unsigned long expire, cur_jiffies;
94 struct mg_drv_data *prv_data = host->dev->platform_data;
95
96 host->error = MG_ERR_NONE;
97 expire = jiffies + msecs_to_jiffies(msec);
98
99 status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
100
101 do {
102 cur_jiffies = jiffies;
103 if (status & MG_REG_STATUS_BIT_BUSY) {
104 if (expect == MG_REG_STATUS_BIT_BUSY)
105 break;
106 } else {
107 /* Check the error condition! */
108 if (status & MG_REG_STATUS_BIT_ERROR) {
109 mg_dump_status("mg_wait", status, host);
110 break;
111 }
112
113 if (expect == MG_STAT_READY)
114 if (MG_READY_OK(status))
115 break;
116
117 if (expect == MG_REG_STATUS_BIT_DATA_REQ)
118 if (status & MG_REG_STATUS_BIT_DATA_REQ)
119 break;
120 }
121 if (!msec) {
122 mg_dump_status("not ready", status, host);
123 return MG_ERR_INV_STAT;
124 }
125 if (prv_data->use_polling)
126 msleep(1);
127
128 status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
129 } while (time_before(cur_jiffies, expire));
130
131 if (time_after_eq(cur_jiffies, expire) && msec)
132 host->error = MG_ERR_TIMEOUT;
133
134 return host->error;
135}
136
137static unsigned int mg_wait_rstout(u32 rstout, u32 msec)
138{
139 unsigned long expire;
140
141 expire = jiffies + msecs_to_jiffies(msec);
142 while (time_before(jiffies, expire)) {
143 if (gpio_get_value(rstout) == 1)
144 return MG_ERR_NONE;
145 msleep(10);
146 }
147
148 return MG_ERR_RSTOUT;
149}
150
151static void mg_unexpected_intr(struct mg_host *host)
152{
153 u32 status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
154
155 mg_dump_status("mg_unexpected_intr", status, host);
156}
157
158static irqreturn_t mg_irq(int irq, void *dev_id)
159{
160 struct mg_host *host = dev_id;
161 void (*handler)(struct mg_host *) = host->mg_do_intr;
162
163 host->mg_do_intr = 0;
164 del_timer(&host->timer);
165 if (!handler)
166 handler = mg_unexpected_intr;
167 handler(host);
168 return IRQ_HANDLED;
169}
170
171static int mg_get_disk_id(struct mg_host *host)
172{
173 u32 i;
174 s32 err;
175 const u16 *id = host->id;
176 struct mg_drv_data *prv_data = host->dev->platform_data;
177 char fwrev[ATA_ID_FW_REV_LEN + 1];
178 char model[ATA_ID_PROD_LEN + 1];
179 char serial[ATA_ID_SERNO_LEN + 1];
180
181 if (!prv_data->use_polling)
182 outb(MG_REG_CTRL_INTR_DISABLE,
183 (unsigned long)host->dev_base +
184 MG_REG_DRV_CTRL);
185
186 outb(MG_CMD_ID, (unsigned long)host->dev_base + MG_REG_COMMAND);
187 err = mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, MG_TMAX_WAIT_RD_DRQ);
188 if (err)
189 return err;
190
191 for (i = 0; i < (MG_SECTOR_SIZE >> 1); i++)
192 host->id[i] = le16_to_cpu(inw((unsigned long)host->dev_base +
193 MG_BUFF_OFFSET + i * 2));
194
195 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
196 err = mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD);
197 if (err)
198 return err;
199
200 if ((id[ATA_ID_FIELD_VALID] & 1) == 0)
201 return MG_ERR_TRANSLATION;
202
203 host->n_sectors = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
204 host->cyls = id[ATA_ID_CYLS];
205 host->heads = id[ATA_ID_HEADS];
206 host->sectors = id[ATA_ID_SECTORS];
207
208 if (MG_RES_SEC && host->heads && host->sectors) {
209 /* modify cyls, n_sectors */
210 host->cyls = (host->n_sectors - MG_RES_SEC) /
211 host->heads / host->sectors;
212 host->nres_sectors = host->n_sectors - host->cyls *
213 host->heads * host->sectors;
214 host->n_sectors -= host->nres_sectors;
215 }
216
217 ata_id_c_string(id, fwrev, ATA_ID_FW_REV, sizeof(fwrev));
218 ata_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
219 ata_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial));
220 printk(KERN_INFO "mg_disk: model: %s\n", model);
221 printk(KERN_INFO "mg_disk: firm: %.8s\n", fwrev);
222 printk(KERN_INFO "mg_disk: serial: %s\n", serial);
223 printk(KERN_INFO "mg_disk: %d + reserved %d sectors\n",
224 host->n_sectors, host->nres_sectors);
225
226 if (!prv_data->use_polling)
227 outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
228 MG_REG_DRV_CTRL);
229
230 return err;
231}
232
233
234static int mg_disk_init(struct mg_host *host)
235{
236 struct mg_drv_data *prv_data = host->dev->platform_data;
237 s32 err;
238 u8 init_status;
239
240 /* hdd rst low */
241 gpio_set_value(host->rst, 0);
242 err = mg_wait(host, MG_REG_STATUS_BIT_BUSY, MG_TMAX_RST_TO_BUSY);
243 if (err)
244 return err;
245
246 /* hdd rst high */
247 gpio_set_value(host->rst, 1);
248 err = mg_wait(host, MG_STAT_READY, MG_TMAX_HDRST_TO_RDY);
249 if (err)
250 return err;
251
252 /* soft reset on */
253 outb(MG_REG_CTRL_RESET |
254 (prv_data->use_polling ? MG_REG_CTRL_INTR_DISABLE :
255 MG_REG_CTRL_INTR_ENABLE),
256 (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
257 err = mg_wait(host, MG_REG_STATUS_BIT_BUSY, MG_TMAX_RST_TO_BUSY);
258 if (err)
259 return err;
260
261 /* soft reset off */
262 outb(prv_data->use_polling ? MG_REG_CTRL_INTR_DISABLE :
263 MG_REG_CTRL_INTR_ENABLE,
264 (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
265 err = mg_wait(host, MG_STAT_READY, MG_TMAX_SWRST_TO_RDY);
266 if (err)
267 return err;
268
269 init_status = inb((unsigned long)host->dev_base + MG_REG_STATUS) & 0xf;
270
271 if (init_status == 0xf)
272 return MG_ERR_INIT_STAT;
273
274 return err;
275}
276
277static void mg_bad_rw_intr(struct mg_host *host)
278{
279 struct request *req = elv_next_request(host->breq);
280 if (req != NULL)
281 if (++req->errors >= MG_MAX_ERRORS ||
282 host->error == MG_ERR_TIMEOUT)
283 end_request(req, 0);
284}
285
286static unsigned int mg_out(struct mg_host *host,
287 unsigned int sect_num,
288 unsigned int sect_cnt,
289 unsigned int cmd,
290 void (*intr_addr)(struct mg_host *))
291{
292 struct mg_drv_data *prv_data = host->dev->platform_data;
293
294 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
295 return host->error;
296
297 if (!prv_data->use_polling) {
298 host->mg_do_intr = intr_addr;
299 mod_timer(&host->timer, jiffies + 3 * HZ);
300 }
301 if (MG_RES_SEC)
302 sect_num += MG_RES_SEC;
303 outb((u8)sect_cnt, (unsigned long)host->dev_base + MG_REG_SECT_CNT);
304 outb((u8)sect_num, (unsigned long)host->dev_base + MG_REG_SECT_NUM);
305 outb((u8)(sect_num >> 8), (unsigned long)host->dev_base +
306 MG_REG_CYL_LOW);
307 outb((u8)(sect_num >> 16), (unsigned long)host->dev_base +
308 MG_REG_CYL_HIGH);
309 outb((u8)((sect_num >> 24) | MG_REG_HEAD_LBA_MODE),
310 (unsigned long)host->dev_base + MG_REG_DRV_HEAD);
311 outb(cmd, (unsigned long)host->dev_base + MG_REG_COMMAND);
312 return MG_ERR_NONE;
313}
314
315static void mg_read(struct request *req)
316{
317 u32 remains, j;
318 struct mg_host *host = req->rq_disk->private_data;
319
320 remains = req->nr_sectors;
321
322 if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_RD, 0) !=
323 MG_ERR_NONE)
324 mg_bad_rw_intr(host);
325
326 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
327 remains, req->sector, req->buffer);
328
329 while (remains) {
330 if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ,
331 MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) {
332 mg_bad_rw_intr(host);
333 return;
334 }
335 for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) {
336 *(u16 *)req->buffer =
337 inw((unsigned long)host->dev_base +
338 MG_BUFF_OFFSET + (j << 1));
339 req->buffer += 2;
340 }
341
342 req->sector++;
343 req->errors = 0;
344 remains = --req->nr_sectors;
345 --req->current_nr_sectors;
346
347 if (req->current_nr_sectors <= 0) {
348 MG_DBG("remain : %d sects\n", remains);
349 end_request(req, 1);
350 if (remains > 0)
351 req = elv_next_request(host->breq);
352 }
353
354 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base +
355 MG_REG_COMMAND);
356 }
357}
358
359static void mg_write(struct request *req)
360{
361 u32 remains, j;
362 struct mg_host *host = req->rq_disk->private_data;
363
364 remains = req->nr_sectors;
365
366 if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_WR, 0) !=
367 MG_ERR_NONE) {
368 mg_bad_rw_intr(host);
369 return;
370 }
371
372
373 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
374 remains, req->sector, req->buffer);
375 while (remains) {
376 if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ,
377 MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
378 mg_bad_rw_intr(host);
379 return;
380 }
381 for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) {
382 outw(*(u16 *)req->buffer,
383 (unsigned long)host->dev_base +
384 MG_BUFF_OFFSET + (j << 1));
385 req->buffer += 2;
386 }
387 req->sector++;
388 remains = --req->nr_sectors;
389 --req->current_nr_sectors;
390
391 if (req->current_nr_sectors <= 0) {
392 MG_DBG("remain : %d sects\n", remains);
393 end_request(req, 1);
394 if (remains > 0)
395 req = elv_next_request(host->breq);
396 }
397
398 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
399 MG_REG_COMMAND);
400 }
401}
402
403static void mg_read_intr(struct mg_host *host)
404{
405 u32 i;
406 struct request *req;
407
408 /* check status */
409 do {
410 i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
411 if (i & MG_REG_STATUS_BIT_BUSY)
412 break;
413 if (!MG_READY_OK(i))
414 break;
415 if (i & MG_REG_STATUS_BIT_DATA_REQ)
416 goto ok_to_read;
417 } while (0);
418 mg_dump_status("mg_read_intr", i, host);
419 mg_bad_rw_intr(host);
420 mg_request(host->breq);
421 return;
422
423ok_to_read:
424 /* get current segment of request */
425 req = elv_next_request(host->breq);
426
427 /* read 1 sector */
428 for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) {
429 *(u16 *)req->buffer =
430 inw((unsigned long)host->dev_base + MG_BUFF_OFFSET +
431 (i << 1));
432 req->buffer += 2;
433 }
434
435 /* manipulate request */
436 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
437 req->sector, req->nr_sectors - 1, req->buffer);
438
439 req->sector++;
440 req->errors = 0;
441 i = --req->nr_sectors;
442 --req->current_nr_sectors;
443
444 /* let know if current segment done */
445 if (req->current_nr_sectors <= 0)
446 end_request(req, 1);
447
448 /* set handler if read remains */
449 if (i > 0) {
450 host->mg_do_intr = mg_read_intr;
451 mod_timer(&host->timer, jiffies + 3 * HZ);
452 }
453
454 /* send read confirm */
455 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
456
457 /* goto next request */
458 if (!i)
459 mg_request(host->breq);
460}
461
462static void mg_write_intr(struct mg_host *host)
463{
464 u32 i, j;
465 u16 *buff;
466 struct request *req;
467
468 /* get current segment of request */
469 req = elv_next_request(host->breq);
470
471 /* check status */
472 do {
473 i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
474 if (i & MG_REG_STATUS_BIT_BUSY)
475 break;
476 if (!MG_READY_OK(i))
477 break;
478 if ((req->nr_sectors <= 1) || (i & MG_REG_STATUS_BIT_DATA_REQ))
479 goto ok_to_write;
480 } while (0);
481 mg_dump_status("mg_write_intr", i, host);
482 mg_bad_rw_intr(host);
483 mg_request(host->breq);
484 return;
485
486ok_to_write:
487 /* manipulate request */
488 req->sector++;
489 i = --req->nr_sectors;
490 --req->current_nr_sectors;
491 req->buffer += MG_SECTOR_SIZE;
492
493 /* let know if current segment or all done */
494 if (!i || (req->bio && req->current_nr_sectors <= 0))
495 end_request(req, 1);
496
497 /* write 1 sector and set handler if remains */
498 if (i > 0) {
499 buff = (u16 *)req->buffer;
500 for (j = 0; j < MG_STORAGE_BUFFER_SIZE >> 1; j++) {
501 outw(*buff, (unsigned long)host->dev_base +
502 MG_BUFF_OFFSET + (j << 1));
503 buff++;
504 }
505 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
506 req->sector, req->nr_sectors, req->buffer);
507 host->mg_do_intr = mg_write_intr;
508 mod_timer(&host->timer, jiffies + 3 * HZ);
509 }
510
511 /* send write confirm */
512 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
513
514 if (!i)
515 mg_request(host->breq);
516}
517
518void mg_times_out(unsigned long data)
519{
520 struct mg_host *host = (struct mg_host *)data;
521 char *name;
522 struct request *req;
523
524 req = elv_next_request(host->breq);
525 if (!req)
526 return;
527
528 host->mg_do_intr = NULL;
529
530 name = req->rq_disk->disk_name;
531 printk(KERN_DEBUG "%s: timeout\n", name);
532
533 host->error = MG_ERR_TIMEOUT;
534 mg_bad_rw_intr(host);
535
536 mg_request(host->breq);
537}
538
539static void mg_request_poll(struct request_queue *q)
540{
541 struct request *req;
542 struct mg_host *host;
543
544 while ((req = elv_next_request(q)) != NULL) {
545 host = req->rq_disk->private_data;
546 if (blk_fs_request(req)) {
547 switch (rq_data_dir(req)) {
548 case READ:
549 mg_read(req);
550 break;
551 case WRITE:
552 mg_write(req);
553 break;
554 default:
555 printk(KERN_WARNING "%s:%d unknown command\n",
556 __func__, __LINE__);
557 end_request(req, 0);
558 break;
559 }
560 }
561 }
562}
563
564static unsigned int mg_issue_req(struct request *req,
565 struct mg_host *host,
566 unsigned int sect_num,
567 unsigned int sect_cnt)
568{
569 u16 *buff;
570 u32 i;
571
572 switch (rq_data_dir(req)) {
573 case READ:
574 if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr)
575 != MG_ERR_NONE) {
576 mg_bad_rw_intr(host);
577 return host->error;
578 }
579 break;
580 case WRITE:
581 /* TODO : handler */
582 outb(MG_REG_CTRL_INTR_DISABLE,
583 (unsigned long)host->dev_base +
584 MG_REG_DRV_CTRL);
585 if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr)
586 != MG_ERR_NONE) {
587 mg_bad_rw_intr(host);
588 return host->error;
589 }
590 del_timer(&host->timer);
591 mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, MG_TMAX_WAIT_WR_DRQ);
592 outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
593 MG_REG_DRV_CTRL);
594 if (host->error) {
595 mg_bad_rw_intr(host);
596 return host->error;
597 }
598 buff = (u16 *)req->buffer;
599 for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) {
600 outw(*buff, (unsigned long)host->dev_base +
601 MG_BUFF_OFFSET + (i << 1));
602 buff++;
603 }
604 mod_timer(&host->timer, jiffies + 3 * HZ);
605 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
606 MG_REG_COMMAND);
607 break;
608 default:
609 printk(KERN_WARNING "%s:%d unknown command\n",
610 __func__, __LINE__);
611 end_request(req, 0);
612 break;
613 }
614 return MG_ERR_NONE;
615}
616
617/* This function also called from IRQ context */
618static void mg_request(struct request_queue *q)
619{
620 struct request *req;
621 struct mg_host *host;
622 u32 sect_num, sect_cnt;
623
624 while (1) {
625 req = elv_next_request(q);
626 if (!req)
627 return;
628
629 host = req->rq_disk->private_data;
630
631 /* check unwanted request call */
632 if (host->mg_do_intr)
633 return;
634
635 del_timer(&host->timer);
636
637 sect_num = req->sector;
638 /* deal whole segments */
639 sect_cnt = req->nr_sectors;
640
641 /* sanity check */
642 if (sect_num >= get_capacity(req->rq_disk) ||
643 ((sect_num + sect_cnt) >
644 get_capacity(req->rq_disk))) {
645 printk(KERN_WARNING
646 "%s: bad access: sector=%d, count=%d\n",
647 req->rq_disk->disk_name,
648 sect_num, sect_cnt);
649 end_request(req, 0);
650 continue;
651 }
652
653 if (!blk_fs_request(req))
654 return;
655
656 if (!mg_issue_req(req, host, sect_num, sect_cnt))
657 return;
658 }
659}
660
661static int mg_getgeo(struct block_device *bdev, struct hd_geometry *geo)
662{
663 struct mg_host *host = bdev->bd_disk->private_data;
664
665 geo->cylinders = (unsigned short)host->cyls;
666 geo->heads = (unsigned char)host->heads;
667 geo->sectors = (unsigned char)host->sectors;
668 return 0;
669}
670
671static struct block_device_operations mg_disk_ops = {
672 .getgeo = mg_getgeo
673};
674
675static int mg_suspend(struct platform_device *plat_dev, pm_message_t state)
676{
677 struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
678 struct mg_host *host = prv_data->host;
679
680 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
681 return -EIO;
682
683 if (!prv_data->use_polling)
684 outb(MG_REG_CTRL_INTR_DISABLE,
685 (unsigned long)host->dev_base +
686 MG_REG_DRV_CTRL);
687
688 outb(MG_CMD_SLEEP, (unsigned long)host->dev_base + MG_REG_COMMAND);
689 /* wait until mflash deep sleep */
690 msleep(1);
691
692 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) {
693 if (!prv_data->use_polling)
694 outb(MG_REG_CTRL_INTR_ENABLE,
695 (unsigned long)host->dev_base +
696 MG_REG_DRV_CTRL);
697 return -EIO;
698 }
699
700 return 0;
701}
702
703static int mg_resume(struct platform_device *plat_dev)
704{
705 struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
706 struct mg_host *host = prv_data->host;
707
708 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
709 return -EIO;
710
711 outb(MG_CMD_WAKEUP, (unsigned long)host->dev_base + MG_REG_COMMAND);
712 /* wait until mflash wakeup */
713 msleep(1);
714
715 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
716 return -EIO;
717
718 if (!prv_data->use_polling)
719 outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
720 MG_REG_DRV_CTRL);
721
722 return 0;
723}
724
725static int mg_probe(struct platform_device *plat_dev)
726{
727 struct mg_host *host;
728 struct resource *rsc;
729 struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
730 int err = 0;
731
732 if (!prv_data) {
733 printk(KERN_ERR "%s:%d fail (no driver_data)\n",
734 __func__, __LINE__);
735 err = -EINVAL;
736 goto probe_err;
737 }
738
739 /* alloc mg_host */
740 host = kzalloc(sizeof(struct mg_host), GFP_KERNEL);
741 if (!host) {
742 printk(KERN_ERR "%s:%d fail (no memory for mg_host)\n",
743 __func__, __LINE__);
744 err = -ENOMEM;
745 goto probe_err;
746 }
747 host->major = MG_DISK_MAJ;
748
749 /* link each other */
750 prv_data->host = host;
751 host->dev = &plat_dev->dev;
752
753 /* io remap */
754 rsc = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
755 if (!rsc) {
756 printk(KERN_ERR "%s:%d platform_get_resource fail\n",
757 __func__, __LINE__);
758 err = -EINVAL;
759 goto probe_err_2;
760 }
761 host->dev_base = ioremap(rsc->start , rsc->end + 1);
762 if (!host->dev_base) {
763 printk(KERN_ERR "%s:%d ioremap fail\n",
764 __func__, __LINE__);
765 err = -EIO;
766 goto probe_err_2;
767 }
768 MG_DBG("dev_base = 0x%x\n", (u32)host->dev_base);
769
770 /* get reset pin */
771 rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO,
772 MG_RST_PIN);
773 if (!rsc) {
774 printk(KERN_ERR "%s:%d get reset pin fail\n",
775 __func__, __LINE__);
776 err = -EIO;
777 goto probe_err_3;
778 }
779 host->rst = rsc->start;
780
781 /* init rst pin */
782 err = gpio_request(host->rst, MG_RST_PIN);
783 if (err)
784 goto probe_err_3;
785 gpio_direction_output(host->rst, 1);
786
787 /* reset out pin */
788 if (!(prv_data->dev_attr & MG_DEV_MASK))
789 goto probe_err_3a;
790
791 if (prv_data->dev_attr != MG_BOOT_DEV) {
792 rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO,
793 MG_RSTOUT_PIN);
794 if (!rsc) {
795 printk(KERN_ERR "%s:%d get reset-out pin fail\n",
796 __func__, __LINE__);
797 err = -EIO;
798 goto probe_err_3a;
799 }
800 host->rstout = rsc->start;
801 err = gpio_request(host->rstout, MG_RSTOUT_PIN);
802 if (err)
803 goto probe_err_3a;
804 gpio_direction_input(host->rstout);
805 }
806
807 /* disk reset */
808 if (prv_data->dev_attr == MG_STORAGE_DEV) {
809 /* If POR seq. not yet finised, wait */
810 err = mg_wait_rstout(host->rstout, MG_TMAX_RSTOUT);
811 if (err)
812 goto probe_err_3b;
813 err = mg_disk_init(host);
814 if (err) {
815 printk(KERN_ERR "%s:%d fail (err code : %d)\n",
816 __func__, __LINE__, err);
817 err = -EIO;
818 goto probe_err_3b;
819 }
820 }
821
822 /* get irq resource */
823 if (!prv_data->use_polling) {
824 host->irq = platform_get_irq(plat_dev, 0);
825 if (host->irq == -ENXIO) {
826 err = host->irq;
827 goto probe_err_3b;
828 }
829 err = request_irq(host->irq, mg_irq,
830 IRQF_DISABLED | IRQF_TRIGGER_RISING,
831 MG_DEV_NAME, host);
832 if (err) {
833 printk(KERN_ERR "%s:%d fail (request_irq err=%d)\n",
834 __func__, __LINE__, err);
835 goto probe_err_3b;
836 }
837
838 }
839
840 /* get disk id */
841 err = mg_get_disk_id(host);
842 if (err) {
843 printk(KERN_ERR "%s:%d fail (err code : %d)\n",
844 __func__, __LINE__, err);
845 err = -EIO;
846 goto probe_err_4;
847 }
848
849 err = register_blkdev(host->major, MG_DISK_NAME);
850 if (err < 0) {
851 printk(KERN_ERR "%s:%d register_blkdev fail (err code : %d)\n",
852 __func__, __LINE__, err);
853 goto probe_err_4;
854 }
855 if (!host->major)
856 host->major = err;
857
858 spin_lock_init(&host->lock);
859
860 if (prv_data->use_polling)
861 host->breq = blk_init_queue(mg_request_poll, &host->lock);
862 else
863 host->breq = blk_init_queue(mg_request, &host->lock);
864
865 if (!host->breq) {
866 err = -ENOMEM;
867 printk(KERN_ERR "%s:%d (blk_init_queue) fail\n",
868 __func__, __LINE__);
869 goto probe_err_5;
870 }
871
872 /* mflash is random device, thanx for the noop */
873 elevator_exit(host->breq->elevator);
874 err = elevator_init(host->breq, "noop");
875 if (err) {
876 printk(KERN_ERR "%s:%d (elevator_init) fail\n",
877 __func__, __LINE__);
878 goto probe_err_6;
879 }
880 blk_queue_max_sectors(host->breq, MG_MAX_SECTS);
881 blk_queue_hardsect_size(host->breq, MG_SECTOR_SIZE);
882
883 init_timer(&host->timer);
884 host->timer.function = mg_times_out;
885 host->timer.data = (unsigned long)host;
886
887 host->gd = alloc_disk(MG_DISK_MAX_PART);
888 if (!host->gd) {
889 printk(KERN_ERR "%s:%d (alloc_disk) fail\n",
890 __func__, __LINE__);
891 err = -ENOMEM;
892 goto probe_err_7;
893 }
894 host->gd->major = host->major;
895 host->gd->first_minor = 0;
896 host->gd->fops = &mg_disk_ops;
897 host->gd->queue = host->breq;
898 host->gd->private_data = host;
899 sprintf(host->gd->disk_name, MG_DISK_NAME"a");
900
901 set_capacity(host->gd, host->n_sectors);
902
903 add_disk(host->gd);
904
905 return err;
906
907probe_err_7:
908 del_timer_sync(&host->timer);
909probe_err_6:
910 blk_cleanup_queue(host->breq);
911probe_err_5:
912 unregister_blkdev(MG_DISK_MAJ, MG_DISK_NAME);
913probe_err_4:
914 if (!prv_data->use_polling)
915 free_irq(host->irq, host);
916probe_err_3b:
917 gpio_free(host->rstout);
918probe_err_3a:
919 gpio_free(host->rst);
920probe_err_3:
921 iounmap(host->dev_base);
922probe_err_2:
923 kfree(host);
924probe_err:
925 return err;
926}
927
928static int mg_remove(struct platform_device *plat_dev)
929{
930 struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
931 struct mg_host *host = prv_data->host;
932 int err = 0;
933
934 /* delete timer */
935 del_timer_sync(&host->timer);
936
937 /* remove disk */
938 if (host->gd) {
939 del_gendisk(host->gd);
940 put_disk(host->gd);
941 }
942 /* remove queue */
943 if (host->breq)
944 blk_cleanup_queue(host->breq);
945
946 /* unregister blk device */
947 unregister_blkdev(host->major, MG_DISK_NAME);
948
949 /* free irq */
950 if (!prv_data->use_polling)
951 free_irq(host->irq, host);
952
953 /* free reset-out pin */
954 if (prv_data->dev_attr != MG_BOOT_DEV)
955 gpio_free(host->rstout);
956
957 /* free rst pin */
958 if (host->rst)
959 gpio_free(host->rst);
960
961 /* unmap io */
962 if (host->dev_base)
963 iounmap(host->dev_base);
964
965 /* free mg_host */
966 kfree(host);
967
968 return err;
969}
970
971static struct platform_driver mg_disk_driver = {
972 .probe = mg_probe,
973 .remove = mg_remove,
974 .suspend = mg_suspend,
975 .resume = mg_resume,
976 .driver = {
977 .name = MG_DEV_NAME,
978 .owner = THIS_MODULE,
979 }
980};
981
982/****************************************************************************
983 *
984 * Module stuff
985 *
986 ****************************************************************************/
987
988static int __init mg_init(void)
989{
990 printk(KERN_INFO "mGine mflash driver, (c) 2008 mGine Co.\n");
991 return platform_driver_register(&mg_disk_driver);
992}
993
994static void __exit mg_exit(void)
995{
996 printk(KERN_INFO "mflash driver : bye bye\n");
997 platform_driver_unregister(&mg_disk_driver);
998}
999
1000module_init(mg_init);
1001module_exit(mg_exit);
1002
1003MODULE_LICENSE("GPL");
1004MODULE_AUTHOR("unsik Kim <donari75@gmail.com>");
1005MODULE_DESCRIPTION("mGine m[g]flash device driver");