import PULS_20180308
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / misc / mediatek / nand / mt8127 / mtk_nand.c
CommitLineData
6fa3eb70
S
1/******************************************************************************
2* mtk_nand.c - MTK NAND Flash Device Driver
3 *
4* Copyright 2009-2012 MediaTek Co.,Ltd.
5 *
6* DESCRIPTION:
7* This file provid the other drivers nand relative functions
8 *
9* modification history
10* ----------------------------------------
11* v3.0, 11 Feb 2010, mtk
12* ----------------------------------------
13******************************************************************************/
14
15#include <linux/slab.h>
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/delay.h>
19#include <linux/errno.h>
20#include <linux/sched.h>
21#include <linux/types.h>
22#include <linux/wait.h>
23#include <linux/spinlock.h>
24#include <linux/interrupt.h>
25#include <linux/mtd/mtd.h>
26#include <linux/mtd/nand.h>
27#include <linux/mtd/partitions.h>
28#include <linux/mtd/nand_ecc.h>
29#include <linux/dma-mapping.h>
30#include <linux/jiffies.h>
31#include <linux/platform_device.h>
32#include <linux/proc_fs.h>
33#include <linux/seq_file.h>
34#include <linux/time.h>
35#include <linux/mm.h>
36#include <linux/xlog.h>
37#include <asm/io.h>
38#include <asm/cacheflush.h>
39#include <asm/uaccess.h>
40#include <linux/miscdevice.h>
41#include <mach/mtk_nand.h>
42#include <mach/dma.h>
43#include <mach/devs.h>
44#include <mach/mt_reg_base.h>
45#include <mach/mt_typedefs.h>
46#include <mach/mt_clkmgr.h>
47#include <mach/mtk_nand.h>
48#include <mach/bmt.h>
49#include <mach/mt_irq.h>
50//#include "partition.h"
51#include <asm/system.h>
52#include "partition_define.h"
53#include <mach/mt_boot.h>
54//#include "../../../../../../source/kernel/drivers/aee/ipanic/ipanic.h"
55#include <linux/rtc.h>
56#include <mach/mt_gpio.h>
57#include <mach/mt_pm_ldo.h>
58#ifdef CONFIG_PWR_LOSS_MTK_SPOH
59#include <mach/power_loss_test.h>
60#endif
61#include <mach/nand_device_define.h>
62
63#define VERSION "v2.1 Fix AHB virt2phys error"
64#define MODULE_NAME "# MTK NAND #"
65#define PROCNAME "driver/nand"
66#define PMT 1
67#define _MTK_NAND_DUMMY_DRIVER_
68#define __INTERNAL_USE_AHB_MODE__ (1)
69#define CFG_FPGA_PLATFORM (0) // for fpga by bean
70#define CFG_RANDOMIZER (1) // for randomizer code
71#define CFG_PERFLOG_DEBUG (0) // for performance log
72#define CFG_2CS_NAND (1) // for 2CS nand
73#define CFG_COMBO_NAND (1) // for Combo nand
74
75#define NFI_TRICKY_CS (1) // must be 1 or > 1?
76
77#define PERI_NFI_CLK_SOURCE_SEL ((volatile P_U32)(PERICFG_BASE+0x424))
78#define PERI_NFI_MAC_CTRL ((volatile P_U32)(PERICFG_BASE+0x428))
79#define NFI_PAD_1X_CLOCK (1) //nfi1X
80
81void show_stack(struct task_struct *tsk, unsigned long *sp);
82extern void mt_irq_set_sens(unsigned int irq, unsigned int sens);
83extern void mt_irq_set_polarity(unsigned int irq,unsigned int polarity);
84
85extern struct mtd_partition g_pasStatic_Partition[PART_MAX_COUNT];
86
87#if defined(MTK_MLC_NAND_SUPPORT)
88bool MLC_DEVICE = TRUE;// to build pass xiaolei
89#endif
90
91#if defined(NAND_OTP_SUPPORT)
92
93#define SAMSUNG_OTP_SUPPORT 1
94#define OTP_MAGIC_NUM 0x4E3AF28B
95#define SAMSUNG_OTP_PAGE_NUM 6
96
97static const unsigned int Samsung_OTP_Page[SAMSUNG_OTP_PAGE_NUM] = { 0x15, 0x16, 0x17, 0x18, 0x19, 0x1b };
98
99static struct mtk_otp_config g_mtk_otp_fuc;
100static spinlock_t g_OTPLock;
101
102#define OTP_MAGIC 'k'
103
104/* NAND OTP IO control number */
105#define OTP_GET_LENGTH _IOW(OTP_MAGIC, 1, int)
106#define OTP_READ _IOW(OTP_MAGIC, 2, int)
107#define OTP_WRITE _IOW(OTP_MAGIC, 3, int)
108
109#define FS_OTP_READ 0
110#define FS_OTP_WRITE 1
111
112/* NAND OTP Error codes */
113#define OTP_SUCCESS 0
114#define OTP_ERROR_OVERSCOPE -1
115#define OTP_ERROR_TIMEOUT -2
116#define OTP_ERROR_BUSY -3
117#define OTP_ERROR_NOMEM -4
118#define OTP_ERROR_RESET -5
119
120struct mtk_otp_config
121{
122 u32(*OTPRead) (u32 PageAddr, void *BufferPtr, void *SparePtr);
123 u32(*OTPWrite) (u32 PageAddr, void *BufferPtr, void *SparePtr);
124 u32(*OTPQueryLength) (u32 * Length);
125};
126
127struct otp_ctl
128{
129 unsigned int QLength;
130 unsigned int Offset;
131 unsigned int Length;
132 char *BufferPtr;
133 unsigned int status;
134};
135#endif
136
137#define ERR_RTN_SUCCESS 1
138#define ERR_RTN_FAIL 0
139#define ERR_RTN_BCH_FAIL -1
140
141#define NFI_SET_REG32(reg, value) \
142do { \
143 g_value = (DRV_Reg32(reg) | (value));\
144 DRV_WriteReg32(reg, g_value); \
145} while(0)
146
147#define NFI_SET_REG16(reg, value) \
148do { \
149 g_value = (DRV_Reg16(reg) | (value));\
150 DRV_WriteReg16(reg, g_value); \
151} while(0)
152
153#define NFI_CLN_REG32(reg, value) \
154do { \
155 g_value = (DRV_Reg32(reg) & (~(value)));\
156 DRV_WriteReg32(reg, g_value); \
157} while(0)
158
159#define NFI_CLN_REG16(reg, value) \
160do { \
161 g_value = (DRV_Reg16(reg) & (~(value)));\
162 DRV_WriteReg16(reg, g_value); \
163} while(0)
164
165#define NFI_WAIT_STATE_DONE(state) do{;}while (__raw_readl(NFI_STA_REG32) & state)
166#define NFI_WAIT_TO_READY() do{;}while (!(__raw_readl(NFI_STA_REG32) & STA_BUSY2READY))
167#define FIFO_PIO_READY(x) (0x1 & x)
168#define WAIT_NFI_PIO_READY(timeout) \
169 do {\
170 while( (!FIFO_PIO_READY(DRV_Reg(NFI_PIO_DIRDY_REG16))) && (--timeout) );\
171 } while(0);
172
173
174#define NAND_SECTOR_SIZE (512)
175#define OOB_PER_SECTOR (16)
176#define OOB_AVAI_PER_SECTOR (8)
177
178#if defined(MTK_COMBO_NAND_SUPPORT)
179 // BMT_POOL_SIZE is not used anymore
180#else
181 #ifndef PART_SIZE_BMTPOOL
182 #define BMT_POOL_SIZE (80)
183 #else
184 #define BMT_POOL_SIZE (PART_SIZE_BMTPOOL)
185 #endif
186#endif
187u8 ecc_threshold;
188#define PMT_POOL_SIZE (2)
189/*******************************************************************************
190 * Gloable Varible Definition
191 *******************************************************************************/
192#if CFG_PERFLOG_DEBUG
193struct nand_perf_log
194{
195 unsigned int ReadPageCount;
196 suseconds_t ReadPageTotalTime;
197 unsigned int ReadBusyCount;
198 suseconds_t ReadBusyTotalTime;
199 unsigned int ReadDMACount;
200 suseconds_t ReadDMATotalTime;
201
202 unsigned int ReadSubPageCount;
203 suseconds_t ReadSubPageTotalTime;
204
205 unsigned int WritePageCount;
206 suseconds_t WritePageTotalTime;
207 unsigned int WriteBusyCount;
208 suseconds_t WriteBusyTotalTime;
209 unsigned int WriteDMACount;
210 suseconds_t WriteDMATotalTime;
211
212 unsigned int EraseBlockCount;
213 suseconds_t EraseBlockTotalTime;
214
215};
216#endif
217#ifdef PWR_LOSS_SPOH
218
219#define PL_TIME_RAND_PROG(chip, page_addr, time) do { \
220 if(host->pl.nand_program_wdt_enable == 1){ \
221 PL_TIME_RAND(page_addr, time, host->pl.last_prog_time);} \
222 else \
223 time=0; \
224 } while(0)
225
226#define PL_TIME_RAND_ERASE(chip, page_addr, time) do { \
227 if(host->pl.nand_erase_wdt_enable == 1){ \
228 PL_TIME_RAND(page_addr, time, host->pl.last_erase_time); \
229 if(time != 0) \
230 printk(KERN_ERR "[MVG_TEST]: Erase reset in %d us\n", time);} \
231 else \
232 time=0; \
233 } while(0)
234
235#define PL_TIME_PROG(duration) do { \
236 host->pl.last_prog_time = duration; \
237 } while(0)
238
239#define PL_TIME_ERASE(duration) do { \
240 host->pl.last_erase_time = duration; \
241 } while(0)
242
243
244#define PL_TIME_PROG_WDT_SET(WDT) do { \
245 host->pl.nand_program_wdt_enable = WDT; \
246 } while(0)
247
248#define PL_TIME_ERASE_WDT_SET(WDT) do { \
249 host->pl.nand_erase_wdt_enable = WDT; \
250 } while(0)
251
252#define PL_NAND_BEGIN(time) PL_BEGIN(time)
253
254#define PL_NAND_RESET(time) PL_RESET(time)
255
256#define PL_NAND_END(pl_time_write, duration) PL_END(pl_time_write, duration)
257
258
259#else
260
261#define PL_TIME_RAND_PROG(chip, page_addr, time)
262#define PL_TIME_RAND_ERASE(chip, page_addr, time)
263
264#define PL_TIME_PROG(duration)
265#define PL_TIME_ERASE(duration)
266
267#define PL_TIME_PROG_WDT_SET(WDT)
268#define PL_TIME_ERASE_WDT_SET(WDT)
269
270#define PL_NAND_BEGIN(time)
271#define PL_NAND_RESET(time)
272#define PL_NAND_END(pl_time_write, duration)
273
274#endif
275
276#if CFG_PERFLOG_DEBUG
277static struct nand_perf_log g_NandPerfLog={0};
278static struct timeval g_NandLogTimer={0};
279#endif
280
281#ifdef NAND_PFM
282static suseconds_t g_PFM_R = 0;
283static suseconds_t g_PFM_W = 0;
284static suseconds_t g_PFM_E = 0;
285static u32 g_PFM_RNum = 0;
286static u32 g_PFM_RD = 0;
287static u32 g_PFM_WD = 0;
288static struct timeval g_now;
289
290#define PFM_BEGIN(time) \
291do_gettimeofday(&g_now); \
292(time) = g_now;
293
294#define PFM_END_R(time, n) \
295do_gettimeofday(&g_now); \
296g_PFM_R += (g_now.tv_sec * 1000000 + g_now.tv_usec) - (time.tv_sec * 1000000 + time.tv_usec); \
297g_PFM_RNum += 1; \
298g_PFM_RD += n; \
299MSG(PERFORMANCE, "%s - Read PFM: %lu, data: %d, ReadOOB: %d (%d, %d)\n", MODULE_NAME , g_PFM_R, g_PFM_RD, g_kCMD.pureReadOOB, g_kCMD.pureReadOOBNum, g_PFM_RNum);
300
301#define PFM_END_W(time, n) \
302do_gettimeofday(&g_now); \
303g_PFM_W += (g_now.tv_sec * 1000000 + g_now.tv_usec) - (time.tv_sec * 1000000 + time.tv_usec); \
304g_PFM_WD += n; \
305MSG(PERFORMANCE, "%s - Write PFM: %lu, data: %d\n", MODULE_NAME, g_PFM_W, g_PFM_WD);
306
307#define PFM_END_E(time) \
308do_gettimeofday(&g_now); \
309g_PFM_E += (g_now.tv_sec * 1000000 + g_now.tv_usec) - (time.tv_sec * 1000000 + time.tv_usec); \
310MSG(PERFORMANCE, "%s - Erase PFM: %lu\n", MODULE_NAME, g_PFM_E);
311#else
312#define PFM_BEGIN(time)
313#define PFM_END_R(time, n)
314#define PFM_END_W(time, n)
315#define PFM_END_E(time)
316#endif
317
318#define TIMEOUT_1 0x1fff
319#define TIMEOUT_2 0x8ff
320#define TIMEOUT_3 0xffff
321#define TIMEOUT_4 0xffff //5000 //PIO
322
323#define NFI_ISSUE_COMMAND(cmd, col_addr, row_addr, col_num, row_num) \
324 do { \
325 DRV_WriteReg(NFI_CMD_REG16,cmd);\
326 while (DRV_Reg32(NFI_STA_REG32) & STA_CMD_STATE);\
327 DRV_WriteReg32(NFI_COLADDR_REG32, col_addr);\
328 DRV_WriteReg32(NFI_ROWADDR_REG32, row_addr);\
329 DRV_WriteReg(NFI_ADDRNOB_REG16, col_num | (row_num<<ADDR_ROW_NOB_SHIFT));\
330 while (DRV_Reg32(NFI_STA_REG32) & STA_ADDR_STATE);\
331 }while(0);
332
333//-------------------------------------------------------------------------------
334static struct completion g_comp_AHB_Done;
335static struct NAND_CMD g_kCMD;
336bool g_bInitDone;
337static int g_i4Interrupt;
338static bool g_bcmdstatus;
339//static bool g_brandstatus;
340static u32 g_value = 0;
341static int g_page_size;
342static int g_block_size;
343static u32 PAGES_PER_BLOCK = 255;
344static bool g_bSyncOrToggle = false;
345static int g_iNFI2X_CLKSRC = ARMPLL;
4b9e9796
S
346//extern unsigned int flash_number;
347//extern flashdev_info_t gen_FlashTable_p[MAX_FLASH];
6fa3eb70
S
348extern int part_num;
349
350#if CFG_2CS_NAND
351bool g_b2Die_CS = FALSE; // for nand base
352static bool g_bTricky_CS = FALSE;
353static u32 g_nanddie_pages = 0;
354#endif
355
356#if __INTERNAL_USE_AHB_MODE__
357BOOL g_bHwEcc = true;
358#else
359BOOL g_bHwEcc = false;
360#endif
361#define LPAGE 16384
362#define LSPARE 2048
363
364static u8 *local_buffer_16_align; // 16 byte aligned buffer, for HW issue
365__attribute__((aligned(64))) static u8 local_buffer[LPAGE + LSPARE];
366static u8 *temp_buffer_16_align; // 16 byte aligned buffer, for HW issue
367__attribute__((aligned(64))) static u8 temp_buffer[LPAGE + LSPARE];
368//static u8 *bean_buffer_16_align; // 16 byte aligned buffer, for HW issue
369//__attribute__((aligned(64))) static u8 bean_buffer[LPAGE + LSPARE];
370
371
372extern struct mtd_perf_log g_MtdPerfLog;
373
374extern void nand_release_device(struct mtd_info *mtd);
375extern int nand_get_device(struct mtd_info *mtd, int new_state);
376bool mtk_nand_SetFeature(struct mtd_info *mtd, u16 cmd, u32 addr, u8 *value, u8 bytes);
377bool mtk_nand_GetFeature(struct mtd_info *mtd, u16 cmd, u32 addr, u8 *value, u8 bytes);
378
379#if CFG_2CS_NAND
380static int mtk_nand_cs_check(struct mtd_info *mtd, u8 *id, u16 cs);
381static u32 mtk_nand_cs_on(struct nand_chip *nand_chip, u16 cs, u32 page);
382#endif
383
384
385static bmt_struct *g_bmt;
386struct mtk_nand_host *host;
387static u8 g_running_dma = 0;
388#ifdef DUMP_NATIVE_BACKTRACE
389static u32 g_dump_count = 0;
390#endif
391//extern struct mtd_partition g_pasStatic_Partition[];//to build pass xiaolei
392//int part_num = PART_NUM;//to build pass xiaolei NUM_PARTITIONS;
393#ifdef PMT
394extern void part_init_pmt(struct mtd_info *mtd, u8 * buf);
395extern struct mtd_partition g_exist_Partition[];
396#endif
397int manu_id;
398int dev_id;
399
400static u8 local_oob_buf[LSPARE];
401
402#ifdef _MTK_NAND_DUMMY_DRIVER_
403int dummy_driver_debug;
404#endif
405
406flashdev_info_t devinfo;
407
408enum NAND_TYPE_MASK{
409 TYPE_ASYNC = 0x0,
410 TYPE_TOGGLE = 0x1,
411 TYPE_SYNC = 0x2,
412 TYPE_RESERVED = 0x3,
413 TYPE_MLC = 0x4, // 1b0
414 TYPE_SLC = 0x4, // 1b1
415};
416
417u32 MICRON_TRANSFER(u32 pageNo);
418u32 SANDISK_TRANSFER(u32 pageNo);
419u32 HYNIX_TRANSFER(u32 pageNo);
420u32 hynix_pairpage_mapping(u32 page, bool high_to_low);
421u32 micron_pairpage_mapping(u32 page, bool high_to_low);
422u32 sandisk_pairpage_mapping(u32 page, bool high_to_low);
423
424typedef u32 (*GetLowPageNumber)(u32 pageNo);
425typedef u32 (*TransferPageNumber)(u32 pageNo, bool high_to_low);
426
427GetLowPageNumber functArray[]=
428{
429 MICRON_TRANSFER,
430 HYNIX_TRANSFER,
431 SANDISK_TRANSFER,
432};
433
434TransferPageNumber fsFuncArray[]=
435{
436 micron_pairpage_mapping,
437 hynix_pairpage_mapping,
438 sandisk_pairpage_mapping,
439};
440
441u32 SANDISK_TRANSFER(u32 pageNo)
442{
443 if(0 == pageNo)
444 {
445 return pageNo;
446 }
447 else
448 {
449 return pageNo+pageNo-1;
450 }
451}
452
453u32 HYNIX_TRANSFER(u32 pageNo)
454{
455 u32 temp;
456 if(pageNo < 4)
457 return pageNo;
458 temp = pageNo+(pageNo&0xFFFFFFFE)-2;
459 return temp;
460}
461
462
463u32 MICRON_TRANSFER(u32 pageNo)
464{
465 u32 temp;
466 if(pageNo < 4)
467 return pageNo;
468 temp = (pageNo - 4) & 0xFFFFFFFE;
469 if(pageNo<=130)
470 return (pageNo+temp);
471 else
472 return (pageNo+temp-2);
473}
474
475u32 sandisk_pairpage_mapping(u32 page, bool high_to_low)
476{
477 if(TRUE == high_to_low)
478 {
479 if(page == 255)
480 return page-2;
481 if((page == 0) || (1 == (page%2)))
482 return page;
483 else
484 {
485 if(page == 2)
486 return 0;
487 else
488 return (page-3);
489 }
490 }
491 else
492 {
493 if((page != 0) && (0 == (page%2)))
494 return page;
495 else
496 {
497 if(page == 255)
498 return page;
499 if(page == 0 || page == 253)
500 return page + 2;
501 else
502 return page+3;
503 }
504 }
505}
506
507u32 hynix_pairpage_mapping(u32 page, bool high_to_low)
508{
509 u32 offset;
510 if(TRUE == high_to_low)
511 {
512 //Micron 256pages
513 if(page<4)
514 {
515 return page;
516 }
517
518 offset=page%4;
519 if(offset==2 || offset==3)
520 {
521 return page;
522 }
523 else
524 {
525 if(page == 4 || page == 5 || page == 254 || page == 255)
526 return page-4;
527 else
528 return page-6;
529 }
530 }
531 else
532 {
533 if(page>251)
534 {
535 return page;
536 }
537 if(page == 0 || page == 1)
538 return page+4;
539 offset=page%4;
540 if(offset==0 || offset==1)
541 {
542 return page;
543 }
544 else
545 {
546 return page+6;
547 }
548 }
549}
550
551u32 micron_pairpage_mapping(u32 page, bool high_to_low)
552{
553 u32 offset;
554 if(TRUE == high_to_low)
555 {
556 //Micron 256pages
557 if((page<4)||(page>251))
558 {
559 return page;
560 }
561
562 offset=page%4;
563 if(offset==0 || offset==1)
564 {
565 return page;
566 }
567 else
568 {
569 return page-6;
570 }
571 }
572 else
573 {
574 if((page == 2) || (page == 3) ||(page>247))
575 {
576 return page;
577 }
578 offset=page%4;
579 if(offset==0 || offset==1)
580 {
581 return page+6;
582 }
583 else
584 {
585 return page;
586 }
587 }
588}
589
590int mtk_nand_paired_page_transfer(u32 pageNo, bool high_to_low)
591{
592 if(devinfo.vendor != VEND_NONE)
593 {
594 return fsFuncArray[devinfo.feature_set.ptbl_idx](pageNo,high_to_low);
595 }
596 else
597 {
598 return 0xFFFFFFFF;
599 }
600}
601
602#if 0//#if CFG_FPGA_PLATFORM
603void nand_enable_clock(void)
604{
605
606}
607
608void nand_disable_clock(void)
609{
610
611}
612#else
613#define PWR_DOWN 0
614#define PWR_ON 1
615void nand_enable_clock(void)
616{
617 if(clock_is_on(MT_CG_PERI_NFI)==PWR_DOWN)
618 enable_clock(MT_CG_PERI_NFI, "NFI");
619 if(clock_is_on(MT_CG_PERI_NFI_ECC)==PWR_DOWN)
620 enable_clock(MT_CG_PERI_NFI_ECC, "NFI");
621 if(clock_is_on(MT_CG_PERI_NFIPAD)==PWR_DOWN)
622 enable_clock(MT_CG_PERI_NFIPAD, "NFI");
623}
624
625void nand_disable_clock(void)
626{
627 if(clock_is_on(MT_CG_PERI_NFIPAD)==PWR_ON)
628 disable_clock(MT_CG_PERI_NFIPAD, "NFI");
629 if(clock_is_on(MT_CG_PERI_NFI_ECC)==PWR_ON)
630 disable_clock(MT_CG_PERI_NFI_ECC, "NFI");
631 if(clock_is_on(MT_CG_PERI_NFI)==PWR_ON)
632 disable_clock(MT_CG_PERI_NFI, "NFI");
633}
634#endif
635
636static struct nand_ecclayout nand_oob_16 = {
637 .eccbytes = 8,
638 .eccpos = {8, 9, 10, 11, 12, 13, 14, 15},
639 .oobfree = {{1, 6}, {0, 0}}
640};
641
642struct nand_ecclayout nand_oob_64 = {
643 .eccbytes = 32,
644 .eccpos = {32, 33, 34, 35, 36, 37, 38, 39,
645 40, 41, 42, 43, 44, 45, 46, 47,
646 48, 49, 50, 51, 52, 53, 54, 55,
647 56, 57, 58, 59, 60, 61, 62, 63},
648 .oobfree = {{1, 7}, {9, 7}, {17, 7}, {25, 6}, {0, 0}}
649};
650
651struct nand_ecclayout nand_oob_128 = {
652 .eccbytes = 64,
653 .eccpos = {
654 64, 65, 66, 67, 68, 69, 70, 71,
655 72, 73, 74, 75, 76, 77, 78, 79,
656 80, 81, 82, 83, 84, 85, 86, 86,
657 88, 89, 90, 91, 92, 93, 94, 95,
658 96, 97, 98, 99, 100, 101, 102, 103,
659 104, 105, 106, 107, 108, 109, 110, 111,
660 112, 113, 114, 115, 116, 117, 118, 119,
661 120, 121, 122, 123, 124, 125, 126, 127},
662 .oobfree = {{1, 7}, {9, 7}, {17, 7}, {25, 7}, {33, 7}, {41, 7}, {49, 7}, {57, 6}}
663};
664
665/**************************************************************************
666* Randomizer
667**************************************************************************/
668#define SS_SEED_NUM 128
669#define EFUSE_RANDOM_CFG ((volatile u32 *)(0xF02061c0))
670#define EFUSE_RANDOM_ENABLE 0x00000004
671static bool use_randomizer = FALSE;
672static bool pre_randomizer = FALSE;
673
674static U16 SS_RANDOM_SEED[SS_SEED_NUM] =
675{
676 //for page 0~127
677 0x576A, 0x05E8, 0x629D, 0x45A3, 0x649C, 0x4BF0, 0x2342, 0x272E,
678 0x7358, 0x4FF3, 0x73EC, 0x5F70, 0x7A60, 0x1AD8, 0x3472, 0x3612,
679 0x224F, 0x0454, 0x030E, 0x70A5, 0x7809, 0x2521, 0x484F, 0x5A2D,
680 0x492A, 0x043D, 0x7F61, 0x3969, 0x517A, 0x3B42, 0x769D, 0x0647,
681 0x7E2A, 0x1383, 0x49D9, 0x07B8, 0x2578, 0x4EEC, 0x4423, 0x352F,
682 0x5B22, 0x72B9, 0x367B, 0x24B6, 0x7E8E, 0x2318, 0x6BD0, 0x5519,
683 0x1783, 0x18A7, 0x7B6E, 0x7602, 0x4B7F, 0x3648, 0x2C53, 0x6B99,
684 0x0C23, 0x67CF, 0x7E0E, 0x4D8C, 0x5079, 0x209D, 0x244A, 0x747B,
685 0x350B, 0x0E4D, 0x7004, 0x6AC3, 0x7F3E, 0x21F5, 0x7A15, 0x2379,
686 0x1517, 0x1ABA, 0x4E77, 0x15A1, 0x04FA, 0x2D61, 0x253A, 0x1302,
687 0x1F63, 0x5AB3, 0x049A, 0x5AE8, 0x1CD7, 0x4A00, 0x30C8, 0x3247,
688 0x729C, 0x5034, 0x2B0E, 0x57F2, 0x00E4, 0x575B, 0x6192, 0x38F8,
689 0x2F6A, 0x0C14, 0x45FC, 0x41DF, 0x38DA, 0x7AE1, 0x7322, 0x62DF,
690 0x5E39, 0x0E64, 0x6D85, 0x5951, 0x5937, 0x6281, 0x33A1, 0x6A32,
691 0x3A5A, 0x2BAC, 0x743A, 0x5E74, 0x3B2E, 0x7EC7, 0x4FD2, 0x5D28,
692 0x751F, 0x3EF8, 0x39B1, 0x4E49, 0x746B, 0x6EF6, 0x44BE, 0x6DB7
693};
694
695
696//#if CFG_PERFLOG_DEBUG
697static suseconds_t Cal_timediff(struct timeval *end_time,struct timeval *start_time )
698{
699 struct timeval difference;
700
701 difference.tv_sec =end_time->tv_sec -start_time->tv_sec ;
702 difference.tv_usec=end_time->tv_usec-start_time->tv_usec;
703
704 /* Using while instead of if below makes the code slightly more robust. */
705
706 while(difference.tv_usec<0)
707 {
708 difference.tv_usec+=1000000;
709 difference.tv_sec -=1;
710 }
711
712 return 1000000LL*difference.tv_sec+
713 difference.tv_usec;
714
715} /* timeval_diff() */
716#if CFG_PERFLOG_DEBUG
717
718void dump_nand_rwcount(void)
719{
720 struct timeval now_time;
721 do_gettimeofday(&now_time);
722 if(Cal_timediff(&now_time,&g_NandLogTimer)>(500*1000)) // Dump per 100ms
723 {
724 MSG(INIT, " RPageCnt: %d (%lu us) RSubCnt: %d (%lu us) WPageCnt: %d (%lu us) ECnt: %d mtd(0/512/1K/2K/3K/4K): %d %d %d %d %d %d\n ",
725 g_NandPerfLog.ReadPageCount,
726 g_NandPerfLog.ReadPageCount ? (g_NandPerfLog.ReadPageTotalTime/g_NandPerfLog.ReadPageCount): 0,
727 g_NandPerfLog.ReadSubPageCount,
728 g_NandPerfLog.ReadSubPageCount? (g_NandPerfLog.ReadSubPageTotalTime/g_NandPerfLog.ReadSubPageCount): 0,
729 g_NandPerfLog.WritePageCount,
730 g_NandPerfLog.WritePageCount? (g_NandPerfLog.WritePageTotalTime/g_NandPerfLog.WritePageCount): 0,
731 g_NandPerfLog.EraseBlockCount,
732 g_MtdPerfLog.read_size_0_512,
733 g_MtdPerfLog.read_size_512_1K,
734 g_MtdPerfLog.read_size_1K_2K,
735 g_MtdPerfLog.read_size_2K_3K,
736 g_MtdPerfLog.read_size_3K_4K,
737 g_MtdPerfLog.read_size_Above_4K
738 );
739
740 memset(&g_NandPerfLog,0x00,sizeof(g_NandPerfLog));
741 memset(&g_MtdPerfLog,0x00,sizeof(g_MtdPerfLog));
742 do_gettimeofday(&g_NandLogTimer);
743
744 }
745}
746#endif
747void dump_nfi(void)
748{
749#if __DEBUG_NAND
750 printk("~~~~Dump NFI Register in Kernel~~~~\n");
751 printk("NFI_CNFG_REG16: 0x%x\n", DRV_Reg16(NFI_CNFG_REG16));
752 printk("NFI_PAGEFMT_REG16: 0x%x\n", DRV_Reg16(NFI_PAGEFMT_REG16));
753 printk("NFI_CON_REG16: 0x%x\n", DRV_Reg16(NFI_CON_REG16));
754 printk("NFI_ACCCON_REG32: 0x%x\n", DRV_Reg32(NFI_ACCCON_REG32));
755 printk("NFI_INTR_EN_REG16: 0x%x\n", DRV_Reg16(NFI_INTR_EN_REG16));
756 printk("NFI_INTR_REG16: 0x%x\n", DRV_Reg16(NFI_INTR_REG16));
757 printk("NFI_CMD_REG16: 0x%x\n", DRV_Reg16(NFI_CMD_REG16));
758 printk("NFI_ADDRNOB_REG16: 0x%x\n", DRV_Reg16(NFI_ADDRNOB_REG16));
759 printk("NFI_COLADDR_REG32: 0x%x\n", DRV_Reg32(NFI_COLADDR_REG32));
760 printk("NFI_ROWADDR_REG32: 0x%x\n", DRV_Reg32(NFI_ROWADDR_REG32));
761 printk("NFI_STRDATA_REG16: 0x%x\n", DRV_Reg16(NFI_STRDATA_REG16));
762 printk("NFI_DATAW_REG32: 0x%x\n", DRV_Reg32(NFI_DATAW_REG32));
763 printk("NFI_DATAR_REG32: 0x%x\n", DRV_Reg32(NFI_DATAR_REG32));
764 printk("NFI_PIO_DIRDY_REG16: 0x%x\n", DRV_Reg16(NFI_PIO_DIRDY_REG16));
765 printk("NFI_STA_REG32: 0x%x\n", DRV_Reg32(NFI_STA_REG32));
766 printk("NFI_FIFOSTA_REG16: 0x%x\n", DRV_Reg16(NFI_FIFOSTA_REG16));
767// printk("NFI_LOCKSTA_REG16: 0x%x\n", DRV_Reg16(NFI_LOCKSTA_REG16));
768 printk("NFI_ADDRCNTR_REG16: 0x%x\n", DRV_Reg16(NFI_ADDRCNTR_REG16));
769 printk("NFI_STRADDR_REG32: 0x%x\n", DRV_Reg32(NFI_STRADDR_REG32));
770 printk("NFI_BYTELEN_REG16: 0x%x\n", DRV_Reg16(NFI_BYTELEN_REG16));
771 printk("NFI_CSEL_REG16: 0x%x\n", DRV_Reg16(NFI_CSEL_REG16));
772 printk("NFI_IOCON_REG16: 0x%x\n", DRV_Reg16(NFI_IOCON_REG16));
773 printk("NFI_FDM0L_REG32: 0x%x\n", DRV_Reg32(NFI_FDM0L_REG32));
774 printk("NFI_FDM0M_REG32: 0x%x\n", DRV_Reg32(NFI_FDM0M_REG32));
775 printk("NFI_LOCK_REG16: 0x%x\n", DRV_Reg16(NFI_LOCK_REG16));
776 printk("NFI_LOCKCON_REG32: 0x%x\n", DRV_Reg32(NFI_LOCKCON_REG32));
777 printk("NFI_LOCKANOB_REG16: 0x%x\n", DRV_Reg16(NFI_LOCKANOB_REG16));
778 printk("NFI_FIFODATA0_REG32: 0x%x\n", DRV_Reg32(NFI_FIFODATA0_REG32));
779 printk("NFI_FIFODATA1_REG32: 0x%x\n", DRV_Reg32(NFI_FIFODATA1_REG32));
780 printk("NFI_FIFODATA2_REG32: 0x%x\n", DRV_Reg32(NFI_FIFODATA2_REG32));
781 printk("NFI_FIFODATA3_REG32: 0x%x\n", DRV_Reg32(NFI_FIFODATA3_REG32));
782 printk("NFI_MASTERSTA_REG16: 0x%x\n", DRV_Reg16(NFI_MASTERSTA_REG16));
783 printk("NFI_DEBUG_CON1_REG16: 0x%x\n", DRV_Reg16(NFI_DEBUG_CON1_REG16));
784 printk("ECC_ENCCON_REG16 :%x\n",*ECC_ENCCON_REG16 );
785 printk("ECC_ENCCNFG_REG32 :%x\n",*ECC_ENCCNFG_REG32 );
786 printk("ECC_ENCDIADDR_REG32 :%x\n",*ECC_ENCDIADDR_REG32 );
787 printk("ECC_ENCIDLE_REG32 :%x\n",*ECC_ENCIDLE_REG32 );
788 printk("ECC_ENCPAR0_REG32 :%x\n",*ECC_ENCPAR0_REG32 );
789 printk("ECC_ENCPAR1_REG32 :%x\n",*ECC_ENCPAR1_REG32 );
790 printk("ECC_ENCPAR2_REG32 :%x\n",*ECC_ENCPAR2_REG32 );
791 printk("ECC_ENCPAR3_REG32 :%x\n",*ECC_ENCPAR3_REG32 );
792 printk("ECC_ENCPAR4_REG32 :%x\n",*ECC_ENCPAR4_REG32 );
793 printk("ECC_ENCPAR5_REG32 :%x\n",*ECC_ENCPAR5_REG32 );
794 printk("ECC_ENCPAR6_REG32 :%x\n",*ECC_ENCPAR6_REG32 );
795 printk("ECC_ENCSTA_REG32 :%x\n",*ECC_ENCSTA_REG32 );
796 printk("ECC_ENCIRQEN_REG16 :%x\n",*ECC_ENCIRQEN_REG16 );
797 printk("ECC_ENCIRQSTA_REG16 :%x\n",*ECC_ENCIRQSTA_REG16 );
798 printk("ECC_DECCON_REG16 :%x\n",*ECC_DECCON_REG16 );
799 printk("ECC_DECCNFG_REG32 :%x\n",*ECC_DECCNFG_REG32 );
800 printk("ECC_DECDIADDR_REG32 :%x\n",*ECC_DECDIADDR_REG32 );
801 printk("ECC_DECIDLE_REG16 :%x\n",*ECC_DECIDLE_REG16 );
802 printk("ECC_DECFER_REG16 :%x\n",*ECC_DECFER_REG16 );
803 printk("ECC_DECENUM0_REG32 :%x\n",*ECC_DECENUM0_REG32 );
804 printk("ECC_DECENUM1_REG32 :%x\n",*ECC_DECENUM1_REG32 );
805 printk("ECC_DECDONE_REG16 :%x\n",*ECC_DECDONE_REG16 );
806 printk("ECC_DECEL0_REG32 :%x\n",*ECC_DECEL0_REG32 );
807 printk("ECC_DECEL1_REG32 :%x\n",*ECC_DECEL1_REG32 );
808 printk("ECC_DECEL2_REG32 :%x\n",*ECC_DECEL2_REG32 );
809 printk("ECC_DECEL3_REG32 :%x\n",*ECC_DECEL3_REG32 );
810 printk("ECC_DECEL4_REG32 :%x\n",*ECC_DECEL4_REG32 );
811 printk("ECC_DECEL5_REG32 :%x\n",*ECC_DECEL5_REG32 );
812 printk("ECC_DECEL6_REG32 :%x\n",*ECC_DECEL6_REG32 );
813 printk("ECC_DECEL7_REG32 :%x\n",*ECC_DECEL7_REG32 );
814 printk("ECC_DECIRQEN_REG16 :%x\n",*ECC_DECIRQEN_REG16 );
815 printk("ECC_DECIRQSTA_REG16 :%x\n",*ECC_DECIRQSTA_REG16 );
816 printk("ECC_DECFSM_REG32 :%x\n",*ECC_DECFSM_REG32 );
817 printk("ECC_BYPASS_REG32 :%x\n",*ECC_BYPASS_REG32 );
818 printk("NFI clock : %s\n", (DRV_Reg32((volatile u32 *)(PERICFG_BASE+0x18)) & (0x1)) ? "Clock Disabled" : "Clock Enabled");
819 printk("NFI clock SEL (MT8127):0x%x: %s\n",(PERICFG_BASE+0x5C), (DRV_Reg32((volatile u32 *)(PERICFG_BASE+0x5C)) & (0x1)) ? "Half clock" : "Quarter clock");
820#endif
821}
822
823u8 NFI_DMA_status(void)
824{
825 return g_running_dma;
826}
827
828u32 NFI_DMA_address(void)
829{
830 return DRV_Reg32(NFI_STRADDR_REG32);
831}
832
833EXPORT_SYMBOL(NFI_DMA_status);
834EXPORT_SYMBOL(NFI_DMA_address);
835
836u32 nand_virt_to_phys_add(u32 va)
837{
838 u32 pageOffset = (va & (PAGE_SIZE - 1));
839 pgd_t *pgd;
840 pmd_t *pmd;
841 pte_t *pte;
842 u32 pa;
843
844 if (virt_addr_valid(va))
845 {
846 return __virt_to_phys(va);
847 }
848
849 if (NULL == current)
850 {
851 printk(KERN_ERR "[nand_virt_to_phys_add] ERROR ,current is NULL! \n");
852 return 0;
853 }
854
855 if (NULL == current->mm)
856 {
857 printk(KERN_ERR "[nand_virt_to_phys_add] ERROR current->mm is NULL! tgid=0x%x, name=%s \n", current->tgid, current->comm);
858 return 0;
859 }
860
861 pgd = pgd_offset(current->mm, va); /* what is tsk->mm */
862 if (pgd_none(*pgd) || pgd_bad(*pgd))
863 {
864 printk(KERN_ERR "[nand_virt_to_phys_add] ERROR, va=0x%x, pgd invalid! \n", va);
865 return 0;
866 }
867
868 pmd = pmd_offset((pud_t *)pgd, va);
869 if (pmd_none(*pmd) || pmd_bad(*pmd))
870 {
871 printk(KERN_ERR "[nand_virt_to_phys_add] ERROR, va=0x%x, pmd invalid! \n", va);
872 return 0;
873 }
874
875 pte = pte_offset_map(pmd, va);
876 if (pte_present(*pte))
877 {
878 pa = (pte_val(*pte) & (PAGE_MASK)) | pageOffset;
879 return pa;
880 }
881
882 printk(KERN_ERR "[nand_virt_to_phys_add] ERROR va=0x%x, pte invalid! \n", va);
883 return 0;
884}
885
886EXPORT_SYMBOL(nand_virt_to_phys_add);
887
888bool get_device_info(u8*id, flashdev_info_t *devinfo)
889{
890 u32 i,m,n,mismatch;
891 int target=-1;
892 u8 target_id_len=0;
893 for (i = 0; i<flash_number; i++){
894 mismatch=0;
895 for(m=0;m<gen_FlashTable_p[i].id_length;m++){
896 if(id[m]!=gen_FlashTable_p[i].id[m]){
897 mismatch=1;
898 break;
899 }
900 }
901 if(mismatch == 0 && gen_FlashTable_p[i].id_length > target_id_len){
902 target=i;
903 target_id_len=gen_FlashTable_p[i].id_length;
904 }
905 }
906
907 if(target != -1){
908 MSG(INIT, "Recognize NAND: ID [");
909 for(n=0;n<gen_FlashTable_p[target].id_length;n++){
910 devinfo->id[n] = gen_FlashTable_p[target].id[n];
911 MSG(INIT, "%x ",devinfo->id[n]);
912 }
913 MSG(INIT, "], Device Name [%s], Page Size [%d]B Spare Size [%d]B Total Size [%d]MB\n",gen_FlashTable_p[target].devciename,gen_FlashTable_p[target].pagesize,gen_FlashTable_p[target].sparesize,gen_FlashTable_p[target].totalsize);
914 devinfo->id_length=gen_FlashTable_p[target].id_length;
915 devinfo->blocksize = gen_FlashTable_p[target].blocksize;
916 devinfo->addr_cycle = gen_FlashTable_p[target].addr_cycle;
917 devinfo->iowidth = gen_FlashTable_p[target].iowidth;
918 devinfo->timmingsetting = gen_FlashTable_p[target].timmingsetting;
919 devinfo->advancedmode = gen_FlashTable_p[target].advancedmode;
920 devinfo->pagesize = gen_FlashTable_p[target].pagesize;
921 devinfo->sparesize = gen_FlashTable_p[target].sparesize;
922 devinfo->totalsize = gen_FlashTable_p[target].totalsize;
923 devinfo->sectorsize = gen_FlashTable_p[target].sectorsize;
924 devinfo->s_acccon= gen_FlashTable_p[target].s_acccon;
925 devinfo->s_acccon1= gen_FlashTable_p[target].s_acccon1;
926 devinfo->freq= gen_FlashTable_p[target].freq;
927 devinfo->vendor = gen_FlashTable_p[target].vendor;
928 //devinfo->ttarget = gen_FlashTable[target].ttarget;
929 memcpy((u8*)&devinfo->feature_set, (u8*)&gen_FlashTable_p[target].feature_set, sizeof(struct MLC_feature_set));
930 memcpy(devinfo->devciename, gen_FlashTable_p[target].devciename, sizeof(devinfo->devciename));
931 return true;
932 }else{
933 MSG(INIT, "Not Found NAND: ID [");
934 for(n=0;n<NAND_MAX_ID;n++){
935 MSG(INIT, "%x ",id[n]);
936 }
937 MSG(INIT, "]\n");
938 return false;
939 }
940}
941#ifdef DUMP_NATIVE_BACKTRACE
942#define NFI_NATIVE_LOG_SD "/sdcard/NFI_native_log_%s-%02d-%02d-%02d_%02d-%02d-%02d.log"
943#define NFI_NATIVE_LOG_DATA "/data/NFI_native_log_%s-%02d-%02d-%02d_%02d-%02d-%02d.log"
944static int nfi_flush_log(char *s)
945{
946 mm_segment_t old_fs;
947 struct rtc_time tm;
948 struct timeval tv = { 0 };
949 struct file *filp = NULL;
950 char name[256];
951 unsigned int re = 0;
952 int data_write = 0;
953
954 do_gettimeofday(&tv);
955 rtc_time_to_tm(tv.tv_sec, &tm);
956 memset(name, 0, sizeof(name));
957 sprintf(name, NFI_NATIVE_LOG_DATA, s, tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
958
959 old_fs = get_fs();
960 set_fs(KERNEL_DS);
961 filp = filp_open(name, O_WRONLY | O_CREAT, 0777);
962 if (IS_ERR(filp))
963 {
964 printk("[NFI_flush_log]error create file in %s, IS_ERR:%ld, PTR_ERR:%ld\n", name, IS_ERR(filp), PTR_ERR(filp));
965 memset(name, 0, sizeof(name));
966 sprintf(name, NFI_NATIVE_LOG_SD, s, tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
967 filp = filp_open(name, O_WRONLY | O_CREAT, 0777);
968 if (IS_ERR(filp))
969 {
970 printk("[NFI_flush_log]error create file in %s, IS_ERR:%ld, PTR_ERR:%ld\n", name, IS_ERR(filp), PTR_ERR(filp));
971 set_fs(old_fs);
972 return -1;
973 }
974 }
975 printk("[NFI_flush_log]log file:%s\n", name);
976 set_fs(old_fs);
977
978 if (!(filp->f_op) || !(filp->f_op->write))
979 {
980 printk("[NFI_flush_log] No operation\n");
981 re = -1;
982 goto ClOSE_FILE;
983 }
984
985 DumpNativeInfo();
986 old_fs = get_fs();
987 set_fs(KERNEL_DS);
988 data_write = vfs_write(filp, (char __user *)NativeInfo, strlen(NativeInfo), &filp->f_pos);
989 if (!data_write)
990 {
991 printk("[nfi_flush_log] write fail\n");
992 re = -1;
993 }
994 set_fs(old_fs);
995
996 ClOSE_FILE:
997 if (filp)
998 {
999 filp_close(filp, current->files);
1000 filp = NULL;
1001 }
1002 return re;
1003}
1004#endif
1005//extern bool MLC_DEVICE;
1006static bool mtk_nand_reset(void);
1007extern u64 part_get_startaddress(u64 byte_address,u32* idx);
1008extern bool raw_partition(u32 index);
1009u32 mtk_nand_page_transform(struct mtd_info *mtd, struct nand_chip *chip, u32 page, u32* blk, u32* map_blk)
1010{
1011 u32 block_size = 1 <<(chip->phys_erase_shift);
1012 u32 page_size = (1<<chip->page_shift);
1013 loff_t start_address;
1014 u32 idx;
1015 u32 block;
1016 u32 page_in_block;
1017 u32 mapped_block;
1018 bool translate = FALSE;
1019 loff_t logical_address = (loff_t)page*(1<<chip->page_shift);
1020 //MSG(INIT , "[BEAN]%d, %x\n",page,logical_address);
1021 if(MLC_DEVICE)
1022 {
1023 start_address = part_get_startaddress(logical_address,&idx);
1024 //MSG(INIT , "[start_address]page = 0x%x, start_address=0x%lx\n",page,start_address);
1025 if(raw_partition(idx))
1026 translate = TRUE;
1027 else
1028 translate = FALSE;
1029 }
1030 if(translate == TRUE)
1031 {
1032 block = (u32)((u32)(start_address >> chip->phys_erase_shift) + (u32)((logical_address-start_address) >> (chip->phys_erase_shift-1)));
1033 page_in_block = ((u32)((logical_address-start_address) >> chip->page_shift) % ((mtd->erasesize/page_size)/2));
1034 //MSG(INIT , "[LOW]0x%x, 0x%x\n",block,page_in_block);
1035
1036 if(devinfo.vendor != VEND_NONE)
1037 {
1038 //page_in_block = devinfo.feature_set.PairPage[page_in_block];
1039 page_in_block = functArray[devinfo.feature_set.ptbl_idx](page_in_block);
1040 }
1041
1042 mapped_block = get_mapping_block_index(block);
1043
1044 //MSG(INIT , "[page_in_block]mapped_block=%d, page_in_block=%d\n",mapped_block,page_in_block);
1045 *blk = block;
1046 *map_blk = mapped_block;
1047 return page_in_block;
1048 }
1049 else
1050 {
1051 block = page/(block_size/page_size);
1052 mapped_block = get_mapping_block_index(block);
1053 page_in_block = page % (block_size/page_size);
1054 //MSG(INIT , "[FULL]0x%x, 0x%x 0x%x 0x%x\n",block,page_in_block,mapped_block, page_in_block+mapped_block*(block_size/page_size));
1055 *blk = block;
1056 *map_blk = mapped_block;
1057 return page_in_block;
1058 }
1059}
1060
1061bool mtk_nand_IsRawPartition(loff_t logical_address)
1062{
1063 u32 idx;
1064 part_get_startaddress(logical_address,&idx);
1065 if(raw_partition(idx))
1066 {
1067 return true;
1068 }
1069 else
1070 {
1071 return false;
1072 }
1073}
1074
1075static int mtk_nand_interface_config(struct mtd_info *mtd)
1076{
1077 u32 timeout;
1078 u32 val;
1079 u32 acccon1;
1080 struct gFeatureSet *feature_set = &(devinfo.feature_set.FeatureSet);
1081 //int clksrc = ARMPLL;
1082 if(devinfo.iowidth == IO_ONFI || devinfo.iowidth ==IO_TOGGLEDDR || devinfo.iowidth ==IO_TOGGLESDR)
1083 {
1084 nand_enable_clock();
1085 //0:26M 1:182M 2:156M 3:124.8M 4:91M 5:62.4M 6:39M 7:26M
1086 if(devinfo.freq == 80) // mode 4
1087 {
1088 g_iNFI2X_CLKSRC = MSDCPLL; // 156M
1089 }else if(devinfo.freq == 100) // mode 5
1090 {
1091 g_iNFI2X_CLKSRC = MAINPLL; //182M
1092 }
1093//reset
1094 //printk("[Bean]mode:%d\n", g_iNFI2X_CLKSRC);
1095 NFI_ISSUE_COMMAND (NAND_CMD_RESET, 0, 0, 0, 0);
1096 timeout = TIMEOUT_4;
1097 while (timeout)
1098 timeout--;
1099 mtk_nand_reset();
1100//set feature
1101 //printk("[Interface Config]cmd:0x%X addr:0x%x feature:0x%x\n",
1102 //feature_set->sfeatureCmd, feature_set->Interface.address, feature_set->Interface.feature);
1103
1104 //mtk_nand_GetFeature(mtd, feature_set->gfeatureCmd, \
1105 //feature_set->Interface.address, &val,4);
1106 //printk("[Interface]0x%X\n", val);
1107 mtk_nand_SetFeature(mtd, (u16) feature_set->sfeatureCmd, \
1108 feature_set->Interface.address, (u8 *)&feature_set->Interface.feature,\
1109 sizeof(feature_set->Interface.feature));
1110 mb();
1111 NFI_CLN_REG32(NFI_DEBUG_CON1_REG16,HWDCM_SWCON_ON);
1112
1113//setup register
1114 mb();
1115 NFI_CLN_REG32(NFI_DEBUG_CON1_REG16,NFI_BYPASS);
1116 //clear bypass of ecc
1117 mb();
1118 NFI_CLN_REG32(ECC_BYPASS_REG32,ECC_BYPASS);
1119 mb();
1120 DRV_WriteReg32(PERICFG_BASE+0x5C, 0x0); // setting default AHB clock
1121 //MSG(INIT, "AHB Clock(0x%x)\n",DRV_Reg32(PERICFG_BASE+0x5C));
1122 mb();
1123 NFI_SET_REG32(PERI_NFI_CLK_SOURCE_SEL, NFI_PAD_1X_CLOCK);
1124 mb();
1125 clkmux_sel(MT_MUX_NFI2X,g_iNFI2X_CLKSRC,"NFI");
1126 mb();
1127 DRV_WriteReg32(NFI_DLYCTRL_REG32, 0x4001);
1128 DRV_WriteReg32(PERI_NFI_MAC_CTRL, 0x10006);
1129 while(0 == (DRV_Reg32(NFI_STA_REG32) && STA_FLASH_MACRO_IDLE));
1130 if(devinfo.iowidth == IO_ONFI)
1131 DRV_WriteReg16(NFI_NAND_TYPE_CNFG_REG32, 2); //ONFI
1132 else
1133 DRV_WriteReg16(NFI_NAND_TYPE_CNFG_REG32, 1); //Toggle
1134 //printk("[Timing]0x%x 0x%x\n", devinfo.s_acccon, devinfo.s_acccon1);
1135 acccon1 = DRV_Reg32(NFI_ACCCON1_REG3);
1136 DRV_WriteReg32(NFI_ACCCON1_REG3,devinfo.s_acccon1);
1137 DRV_WriteReg32(NFI_ACCCON_REG32,devinfo.s_acccon);
1138//read back confirm
1139 mtk_nand_GetFeature(mtd, feature_set->gfeatureCmd, \
1140 feature_set->Interface.address, (u8 *)&val,4);
1141 //printk("[Bean]feature is %x\n", val);
1142 if((val&0xFF) != (feature_set->Interface.feature & 0xFF))
1143 {
1144 MSG(INIT, "[%s] fail 0x%X\n",__FUNCTION__,val);
1145 NFI_ISSUE_COMMAND (NAND_CMD_RESET, 0, 0, 0, 0); //ASYNC
1146 timeout = TIMEOUT_4;
1147 while (timeout)
1148 timeout--;
1149 mtk_nand_reset();
1150 clkmux_sel(MT_MUX_NFI2X, MAINPLL, "NFI"); // 182M
1151 NFI_SET_REG32(NFI_DEBUG_CON1_REG16,NFI_BYPASS);
1152 NFI_SET_REG32(ECC_BYPASS_REG32,ECC_BYPASS);
1153 NFI_CLN_REG32(PERI_NFI_CLK_SOURCE_SEL, NFI_PAD_1X_CLOCK);
1154 DRV_WriteReg32(PERICFG_BASE+0x5C, 0x1); // setting AHB clock
1155 //MSG(INIT, "AHB Clock(0x%x)\n",DRV_Reg32(PERICFG_BASE+0x5C));
1156 DRV_WriteReg32(NFI_ACCCON1_REG3,acccon1);
1157 DRV_WriteReg32(NFI_ACCCON_REG32,devinfo.timmingsetting);
1158 DRV_WriteReg16(NFI_NAND_TYPE_CNFG_REG32, 0); //Legacy
1159 g_bSyncOrToggle = false;
1160 return 0;
1161 }
1162 g_bSyncOrToggle = true;
1163
1164 MSG(INIT, "[%s] success 0x%X\n",__FUNCTION__, devinfo.iowidth);
1165 //extern void log_boot(char *str);
1166 //log_boot("[Bean]sync mode success!");
1167 }
1168 else
1169 {
1170 g_bSyncOrToggle = false;
1171 MSG(INIT, "[%s] legacy interface \n",__FUNCTION__);
1172 return 0;
1173 }
1174
1175 return 1;
1176}
1177
1178#if CFG_RANDOMIZER
1179static int mtk_nand_turn_on_randomizer(u32 page, int type, int fgPage)
1180{
1181 u32 u4NFI_CFG = 0;
1182 u32 u4NFI_RAN_CFG = 0;
1183 u4NFI_CFG = DRV_Reg32(NFI_CNFG_REG16);
1184
1185 DRV_WriteReg32(NFI_ENMPTY_THRESH_REG32, 40); // empty threshold 40
1186
1187 if (type) //encode
1188 {
1189 DRV_WriteReg32(NFI_RANDOM_ENSEED01_TS_REG32, 0);
1190 DRV_WriteReg32(NFI_RANDOM_ENSEED02_TS_REG32, 0);
1191 DRV_WriteReg32(NFI_RANDOM_ENSEED03_TS_REG32, 0);
1192 DRV_WriteReg32(NFI_RANDOM_ENSEED04_TS_REG32, 0);
1193 DRV_WriteReg32(NFI_RANDOM_ENSEED05_TS_REG32, 0);
1194 DRV_WriteReg32(NFI_RANDOM_ENSEED06_TS_REG32, 0);
1195 }
1196 else
1197 {
1198 DRV_WriteReg32(NFI_RANDOM_DESEED01_TS_REG32, 0);
1199 DRV_WriteReg32(NFI_RANDOM_DESEED02_TS_REG32, 0);
1200 DRV_WriteReg32(NFI_RANDOM_DESEED03_TS_REG32, 0);
1201 DRV_WriteReg32(NFI_RANDOM_DESEED04_TS_REG32, 0);
1202 DRV_WriteReg32(NFI_RANDOM_DESEED05_TS_REG32, 0);
1203 DRV_WriteReg32(NFI_RANDOM_DESEED06_TS_REG32, 0);
1204 }
1205 u4NFI_CFG |= CNFG_RAN_SEL;
1206 if(PAGES_PER_BLOCK <= SS_SEED_NUM)
1207 {
1208 if (type)
1209 {
1210 u4NFI_RAN_CFG |= RAN_CNFG_ENCODE_SEED(SS_RANDOM_SEED[page & (PAGES_PER_BLOCK-1)]) | RAN_CNFG_ENCODE_EN;
1211 }
1212 else
1213 {
1214 u4NFI_RAN_CFG |= RAN_CNFG_DECODE_SEED(SS_RANDOM_SEED[page & (PAGES_PER_BLOCK-1)]) | RAN_CNFG_DECODE_EN;
1215 }
1216 }
1217 else
1218 {
1219 if (type)
1220 {
1221 u4NFI_RAN_CFG |= RAN_CNFG_ENCODE_SEED(SS_RANDOM_SEED[page & (SS_SEED_NUM-1)]) | RAN_CNFG_ENCODE_EN;
1222 }
1223 else
1224 {
1225 u4NFI_RAN_CFG |= RAN_CNFG_DECODE_SEED(SS_RANDOM_SEED[page & (SS_SEED_NUM-1)]) | RAN_CNFG_DECODE_EN;
1226 }
1227 }
1228
1229
1230 if(fgPage) //reload seed for each page
1231 u4NFI_CFG &= ~CNFG_RAN_SEC;
1232 else //reload seed for each sector
1233 u4NFI_CFG |= CNFG_RAN_SEC;
1234
1235 DRV_WriteReg32(NFI_CNFG_REG16, u4NFI_CFG);
1236 DRV_WriteReg32(NFI_RANDOM_CNFG_REG32, u4NFI_RAN_CFG);
1237 //MSG(INIT, "[K]ran turn on type:%d 0x%x 0x%x\n", type, DRV_Reg32(NFI_RANDOM_CNFG_REG32), page);
1238 return 0;
1239}
1240
1241static bool mtk_nand_israndomizeron(void)
1242{
1243 u32 nfi_ran_cnfg = 0;
1244 nfi_ran_cnfg = DRV_Reg32(NFI_RANDOM_CNFG_REG32);
1245 if(nfi_ran_cnfg&(RAN_CNFG_ENCODE_EN | RAN_CNFG_DECODE_EN))
1246 return TRUE;
1247
1248 return FALSE;
1249}
1250
1251static void mtk_nand_turn_off_randomizer(void)
1252{
1253 u32 u4NFI_CFG = DRV_Reg32(NFI_CNFG_REG16);
1254 u4NFI_CFG &= ~CNFG_RAN_SEL;
1255 u4NFI_CFG &= ~CNFG_RAN_SEC;
1256 DRV_WriteReg32(NFI_RANDOM_CNFG_REG32, 0);
1257 DRV_WriteReg32(NFI_CNFG_REG16, u4NFI_CFG);
1258 //MSG(INIT, "[K]ran turn off\n");
1259}
1260#else
1261#define mtk_nand_israndomizeron() (FALSE)
1262#define mtk_nand_turn_on_randomizer(page, type, fgPage)
1263#define mtk_nand_turn_off_randomizer()
1264#endif
1265
1266
1267/******************************************************************************
1268 * mtk_nand_irq_handler
1269 *
1270 * DESCRIPTION:
1271 * NAND interrupt handler!
1272 *
1273 * PARAMETERS:
1274 * int irq
1275 * void *dev_id
1276 *
1277 * RETURNS:
1278 * IRQ_HANDLED : Successfully handle the IRQ
1279 *
1280 * NOTES:
1281 * None
1282 *
1283 ******************************************************************************/
1284/* Modified for TCM used */
1285static irqreturn_t mtk_nand_irq_handler(int irqno, void *dev_id)
1286{
1287 u16 u16IntStatus = DRV_Reg16(NFI_INTR_REG16);
1288 (void)irqno;
1289
1290 if (u16IntStatus & (u16) INTR_AHB_DONE_EN)
1291 {
1292 complete(&g_comp_AHB_Done);
1293 }
1294 return IRQ_HANDLED;
1295}
1296
1297/******************************************************************************
1298 * ECC_Config
1299 *
1300 * DESCRIPTION:
1301 * Configure HW ECC!
1302 *
1303 * PARAMETERS:
1304 * struct mtk_nand_host_hw *hw
1305 *
1306 * RETURNS:
1307 * None
1308 *
1309 * NOTES:
1310 * None
1311 *
1312 ******************************************************************************/
1313static void ECC_Config(struct mtk_nand_host_hw *hw,u32 ecc_bit)
1314{
1315 u32 u4ENCODESize;
1316 u32 u4DECODESize;
1317 u32 ecc_bit_cfg = ECC_CNFG_ECC4;
1318
1319 switch(ecc_bit){
1320#ifndef MTK_COMBO_NAND_SUPPORT
1321 case 4:
1322 ecc_bit_cfg = ECC_CNFG_ECC4;
1323 break;
1324 case 8:
1325 ecc_bit_cfg = ECC_CNFG_ECC8;
1326 break;
1327 case 10:
1328 ecc_bit_cfg = ECC_CNFG_ECC10;
1329 break;
1330 case 12:
1331 ecc_bit_cfg = ECC_CNFG_ECC12;
1332 break;
1333 case 14:
1334 ecc_bit_cfg = ECC_CNFG_ECC14;
1335 break;
1336 case 16:
1337 ecc_bit_cfg = ECC_CNFG_ECC16;
1338 break;
1339 case 18:
1340 ecc_bit_cfg = ECC_CNFG_ECC18;
1341 break;
1342 case 20:
1343 ecc_bit_cfg = ECC_CNFG_ECC20;
1344 break;
1345 case 22:
1346 ecc_bit_cfg = ECC_CNFG_ECC22;
1347 break;
1348 case 24:
1349 ecc_bit_cfg = ECC_CNFG_ECC24;
1350 break;
1351 #endif
1352 case 28:
1353 ecc_bit_cfg = ECC_CNFG_ECC28;
1354 break;
1355 case 32:
1356 ecc_bit_cfg = ECC_CNFG_ECC32;
1357 break;
1358 case 36:
1359 ecc_bit_cfg = ECC_CNFG_ECC36;
1360 break;
1361 case 40:
1362 ecc_bit_cfg = ECC_CNFG_ECC40;
1363 break;
1364 case 44:
1365 ecc_bit_cfg = ECC_CNFG_ECC44;
1366 break;
1367 case 48:
1368 ecc_bit_cfg = ECC_CNFG_ECC48;
1369 break;
1370 case 52:
1371 ecc_bit_cfg = ECC_CNFG_ECC52;
1372 break;
1373 case 56:
1374 ecc_bit_cfg = ECC_CNFG_ECC56;
1375 break;
1376 case 60:
1377 ecc_bit_cfg = ECC_CNFG_ECC60;
1378 break;
1379 default:
1380 break;
1381
1382 }
1383 DRV_WriteReg16(ECC_DECCON_REG16, DEC_DE);
1384 do
1385 {;
1386 }
1387 while (!DRV_Reg16(ECC_DECIDLE_REG16));
1388
1389 DRV_WriteReg16(ECC_ENCCON_REG16, ENC_DE);
1390 do
1391 {;
1392 }
1393 while (!DRV_Reg32(ECC_ENCIDLE_REG32));
1394
1395 /* setup FDM register base */
1396// DRV_WriteReg32(ECC_FDMADDR_REG32, NFI_FDM0L_REG32);
1397
1398 /* Sector + FDM */
1399 u4ENCODESize = (hw->nand_sec_size + 8) << 3;
1400 /* Sector + FDM + YAFFS2 meta data bits */
1401 u4DECODESize = ((hw->nand_sec_size + 8) << 3) + ecc_bit * ECC_PARITY_BIT;
1402
1403 /* configure ECC decoder && encoder */
1404 DRV_WriteReg32(ECC_DECCNFG_REG32, ecc_bit_cfg | DEC_CNFG_NFI | DEC_CNFG_EMPTY_EN | (u4DECODESize << DEC_CNFG_CODE_SHIFT));
1405
1406 DRV_WriteReg32(ECC_ENCCNFG_REG32, ecc_bit_cfg | ENC_CNFG_NFI | (u4ENCODESize << ENC_CNFG_MSG_SHIFT));
1407#ifndef MANUAL_CORRECT
1408 NFI_SET_REG32(ECC_DECCNFG_REG32, DEC_CNFG_CORRECT);
1409#else
1410 NFI_SET_REG32(ECC_DECCNFG_REG32, DEC_CNFG_EL);
1411#endif
1412}
1413
1414/******************************************************************************
1415 * ECC_Decode_Start
1416 *
1417 * DESCRIPTION:
1418 * HW ECC Decode Start !
1419 *
1420 * PARAMETERS:
1421 * None
1422 *
1423 * RETURNS:
1424 * None
1425 *
1426 * NOTES:
1427 * None
1428 *
1429 ******************************************************************************/
1430static void ECC_Decode_Start(void)
1431{
1432 /* wait for device returning idle */
1433 while (!(DRV_Reg16(ECC_DECIDLE_REG16) & DEC_IDLE)) ;
1434 DRV_WriteReg16(ECC_DECCON_REG16, DEC_EN);
1435}
1436
1437/******************************************************************************
1438 * ECC_Decode_End
1439 *
1440 * DESCRIPTION:
1441 * HW ECC Decode End !
1442 *
1443 * PARAMETERS:
1444 * None
1445 *
1446 * RETURNS:
1447 * None
1448 *
1449 * NOTES:
1450 * None
1451 *
1452 ******************************************************************************/
1453static void ECC_Decode_End(void)
1454{
1455 /* wait for device returning idle */
1456 while (!(DRV_Reg16(ECC_DECIDLE_REG16) & DEC_IDLE)) ;
1457 DRV_WriteReg16(ECC_DECCON_REG16, DEC_DE);
1458}
1459
1460/******************************************************************************
1461 * ECC_Encode_Start
1462 *
1463 * DESCRIPTION:
1464 * HW ECC Encode Start !
1465 *
1466 * PARAMETERS:
1467 * None
1468 *
1469 * RETURNS:
1470 * None
1471 *
1472 * NOTES:
1473 * None
1474 *
1475 ******************************************************************************/
1476static void ECC_Encode_Start(void)
1477{
1478 /* wait for device returning idle */
1479 while (!(DRV_Reg32(ECC_ENCIDLE_REG32) & ENC_IDLE)) ;
1480 mb();
1481 DRV_WriteReg16(ECC_ENCCON_REG16, ENC_EN);
1482}
1483
1484/******************************************************************************
1485 * ECC_Encode_End
1486 *
1487 * DESCRIPTION:
1488 * HW ECC Encode End !
1489 *
1490 * PARAMETERS:
1491 * None
1492 *
1493 * RETURNS:
1494 * None
1495 *
1496 * NOTES:
1497 * None
1498 *
1499 ******************************************************************************/
1500static void ECC_Encode_End(void)
1501{
1502 /* wait for device returning idle */
1503 while (!(DRV_Reg32(ECC_ENCIDLE_REG32) & ENC_IDLE)) ;
1504 mb();
1505 DRV_WriteReg16(ECC_ENCCON_REG16, ENC_DE);
1506}
1507#if 0
1508static bool is_empty_page(u8 * spare_buf, u32 sec_num){
1509 u32 i=0;
1510 bool is_empty=true;
1511#if 0
1512 for(i=0;i<sec_num*8;i++){
1513 if(spare_buf[i]!=0xFF){
1514 is_empty=false;
1515 break;
1516 }
1517 }
1518 printk("\n");
1519#else
1520 for(i=0;i<OOB_INDEX_SIZE;i++){
1521 //xlog_printk(ANDROID_LOG_INFO,"NFI", "flag byte: %x ",spare_buf[OOB_INDEX_OFFSET+i] );
1522 switch(g_page_size)
1523 {
1524 case 2048:
1525 if(spare_buf[13+i] !=0xFF){
1526 is_empty=false;
1527 //break;
1528 }
1529 break;
1530 default:
1531 if(spare_buf[OOB_INDEX_OFFSET+i] !=0xFF){
1532 is_empty=false;
1533 //break;
1534 }
1535 }
1536 if(!is_empty)
1537 break;
1538 }
1539#endif
1540 xlog_printk(ANDROID_LOG_INFO,"NFI", "This page is %s!\n",is_empty?"empty":"occupied");
1541 return is_empty;
1542}
1543static bool return_fake_buf(u8 * data_buf, u32 page_size, u32 sec_num,u32 u4PageAddr){
1544 u32 i=0,j=0;
1545 u32 sec_zero_count=0;
1546 u8 t=0;
1547 u8 *p=data_buf;
1548 bool ret=true;
1549 for(j=0;j<sec_num;j++){
1550 p=data_buf+j*host->hw->nand_sec_size;
1551 sec_zero_count=0;
1552 for(i=0;i<host->hw->nand_sec_size;i++){
1553 t=p[i];
1554 t=~t;
1555 t=((t&0xaa)>>1) + (t&0x55);
1556 t=((t&0xcc)>>2)+(t&0x33);
1557 t=((t&0xf0f0)>>4)+(t&0x0f0f);
1558 sec_zero_count+=t;
1559 if(t>0){
1560 xlog_printk(ANDROID_LOG_INFO,"NFI", "there is %d bit filp at sector(%d): %d in empty page \n ",t,j,i);
1561 }
1562 }
1563 if(sec_zero_count > 2){
1564 xlog_printk(ANDROID_LOG_ERROR,"NFI","too many bit filp=%d @ page addr=0x%x, we can not return fake buf\n",sec_zero_count,u4PageAddr);
1565 ret=false;
1566 }
1567 }
1568 return ret;
1569}
1570#endif
1571/******************************************************************************
1572 * mtk_nand_check_bch_error
1573 *
1574 * DESCRIPTION:
1575 * Check BCH error or not !
1576 *
1577 * PARAMETERS:
1578 * struct mtd_info *mtd
1579 * u8* pDataBuf
1580 * u32 u4SecIndex
1581 * u32 u4PageAddr
1582 *
1583 * RETURNS:
1584 * None
1585 *
1586 * NOTES:
1587 * None
1588 *
1589 ******************************************************************************/
1590static bool mtk_nand_check_bch_error(struct mtd_info *mtd, u8 * pDataBuf,u8 * spareBuf,u32 u4SecIndex, u32 u4PageAddr, u32* bitmap)
1591{
1592 bool ret = true;
1593 u16 u2SectorDoneMask = 1 << u4SecIndex;
1594 u32 u4ErrorNumDebug0,u4ErrorNumDebug1, i, u4ErrNum;
1595 u32 timeout = 0xFFFF;
1596 u32 correct_count = 0;
1597 u32 page_size=(u4SecIndex+1)*host->hw->nand_sec_size;
1598 u32 sec_num=u4SecIndex+1;
1599 //u32 bitflips = sec_num * 39;
1600 u16 failed_sec=0;
1601 u32 maxSectorBitErr = 0;
1602
1603#ifdef MANUAL_CORRECT
1604 u32 au4ErrBitLoc[6];
1605 u32 u4ErrByteLoc, u4BitOffset;
1606 u32 u4ErrBitLoc1th, u4ErrBitLoc2nd;
1607#endif
1608
1609 while (0 == (u2SectorDoneMask & DRV_Reg16(ECC_DECDONE_REG16)))
1610 {
1611 timeout--;
1612 if (0 == timeout)
1613 {
1614 return false;
1615 }
1616 }
1617#ifndef MANUAL_CORRECT
1618 if(0 == (DRV_Reg32(NFI_STA_REG32) & STA_READ_EMPTY))
1619 {
1620 u4ErrorNumDebug0 = DRV_Reg32(ECC_DECENUM0_REG32);
1621 u4ErrorNumDebug1 = DRV_Reg32(ECC_DECENUM1_REG32);
1622 if (0 != (u4ErrorNumDebug0 & 0xFFFFFFFF) || 0 != (u4ErrorNumDebug1 & 0xFFFFFFFF))
1623 {
1624 for (i = 0; i <= u4SecIndex; ++i)
1625 {
1626#if 1
1627 u4ErrNum = (DRV_Reg32((ECC_DECENUM0_REG32+(i/4)))>>((i%4)*8))& ERR_NUM0;
1628#else
1629 if (i < 4)
1630 {
1631 u4ErrNum = DRV_Reg32(ECC_DECENUM0_REG32) >> (i * 8);
1632 } else
1633 {
1634 u4ErrNum = DRV_Reg32(ECC_DECENUM1_REG32) >> ((i - 4) * 8);
1635 }
1636 u4ErrNum &= ERR_NUM0;
1637#endif
1638 if (ERR_NUM0 == u4ErrNum)
1639 {
1640 failed_sec++;
1641 ret = false;
1642 //xlog_printk(ANDROID_LOG_WARN,"NFI", "UnCorrectable ECC errors at PageAddr=%d, Sector=%d\n", u4PageAddr, i);
1643 MSG(INIT,"UnCorrectable ECC errors at PageAddr=%d, Sector=%d\n", u4PageAddr, i);
1644 } else
1645 {
1646 if(bitmap)
1647 *bitmap |= 1 << i;
1648 if (u4ErrNum)
1649 {
1650 if(maxSectorBitErr < u4ErrNum)
1651 maxSectorBitErr = u4ErrNum;
1652 correct_count += u4ErrNum;
1653 // xlog_printk(ANDROID_LOG_INFO,"NFI"," In kernel Correct %d ECC error(s) at PageAddr=%d, Sector=%d\n", u4ErrNum, u4PageAddr, i);
1654 }
1655 }
1656 }
1657 mtd->ecc_stats.failed+=failed_sec;
1658 if ((maxSectorBitErr > ecc_threshold) && (FALSE != ret))
1659 {
1660 MSG(INIT,"ECC bit flips (0x%x) exceed eccthreshold (0x%x),u4PageAddr 0x%x\n",maxSectorBitErr,ecc_threshold,u4PageAddr);
1661 mtd->ecc_stats.corrected++;
1662 } else
1663 {
1664 //xlog_printk(ANDROID_LOG_INFO,"NFI", "Less than 39 bit error, ignore\n");
1665 }
1666 }
1667 }
1668
1669 if(0 != (DRV_Reg32(NFI_STA_REG32) & STA_READ_EMPTY))
1670 {
1671 ret=true;
1672 //MSG(INIT, "empty page, empty buffer returned\n");
1673 memset(pDataBuf,0xff,page_size);
1674 memset(spareBuf,0xff,sec_num*8);
1675 maxSectorBitErr = 0;
1676 failed_sec=0;
1677 }
1678
1679#else
1680 /* We will manually correct the error bits in the last sector, not all the sectors of the page! */
1681 memset(au4ErrBitLoc, 0x0, sizeof(au4ErrBitLoc));
1682 u4ErrorNumDebug = DRV_Reg32(ECC_DECENUM_REG32);
1683 u4ErrNum = (DRV_Reg32((ECC_DECENUM_REG32+(u4SecIndex/4)))>>((u4SecIndex%4)*8))& ERR_NUM0;
1684
1685 if (u4ErrNum)
1686 {
1687 if (ERR_NUM0 == u4ErrNum)
1688 {
1689 mtd->ecc_stats.failed++;
1690 ret = false;
1691 //printk(KERN_ERR"UnCorrectable at PageAddr=%d\n", u4PageAddr);
1692 } else
1693 {
1694 for (i = 0; i < ((u4ErrNum + 1) >> 1); ++i)
1695 {
1696 au4ErrBitLoc[i] = DRV_Reg32(ECC_DECEL0_REG32 + i);
1697 u4ErrBitLoc1th = au4ErrBitLoc[i] & 0x3FFF;
1698
1699 if (u4ErrBitLoc1th < 0x1000)
1700 {
1701 u4ErrByteLoc = u4ErrBitLoc1th / 8;
1702 u4BitOffset = u4ErrBitLoc1th % 8;
1703 pDataBuf[u4ErrByteLoc] = pDataBuf[u4ErrByteLoc] ^ (1 << u4BitOffset);
1704 mtd->ecc_stats.corrected++;
1705 } else
1706 {
1707 mtd->ecc_stats.failed++;
1708 //printk(KERN_ERR"UnCorrectable ErrLoc=%d\n", au4ErrBitLoc[i]);
1709 }
1710 u4ErrBitLoc2nd = (au4ErrBitLoc[i] >> 16) & 0x3FFF;
1711 if (0 != u4ErrBitLoc2nd)
1712 {
1713 if (u4ErrBitLoc2nd < 0x1000)
1714 {
1715 u4ErrByteLoc = u4ErrBitLoc2nd / 8;
1716 u4BitOffset = u4ErrBitLoc2nd % 8;
1717 pDataBuf[u4ErrByteLoc] = pDataBuf[u4ErrByteLoc] ^ (1 << u4BitOffset);
1718 mtd->ecc_stats.corrected++;
1719 } else
1720 {
1721 mtd->ecc_stats.failed++;
1722 //printk(KERN_ERR"UnCorrectable High ErrLoc=%d\n", au4ErrBitLoc[i]);
1723 }
1724 }
1725 }
1726 }
1727 if (0 == (DRV_Reg16(ECC_DECFER_REG16) & (1 << u4SecIndex)))
1728 {
1729 ret = false;
1730 }
1731 }
1732#endif
1733 return ret;
1734}
1735
1736/******************************************************************************
1737 * mtk_nand_RFIFOValidSize
1738 *
1739 * DESCRIPTION:
1740 * Check the Read FIFO data bytes !
1741 *
1742 * PARAMETERS:
1743 * u16 u2Size
1744 *
1745 * RETURNS:
1746 * None
1747 *
1748 * NOTES:
1749 * None
1750 *
1751 ******************************************************************************/
1752static bool mtk_nand_RFIFOValidSize(u16 u2Size)
1753{
1754 u32 timeout = 0xFFFF;
1755 while (FIFO_RD_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) < u2Size)
1756 {
1757 timeout--;
1758 if (0 == timeout)
1759 {
1760 return false;
1761 }
1762 }
1763 return true;
1764}
1765
1766/******************************************************************************
1767 * mtk_nand_WFIFOValidSize
1768 *
1769 * DESCRIPTION:
1770 * Check the Write FIFO data bytes !
1771 *
1772 * PARAMETERS:
1773 * u16 u2Size
1774 *
1775 * RETURNS:
1776 * None
1777 *
1778 * NOTES:
1779 * None
1780 *
1781 ******************************************************************************/
1782static bool mtk_nand_WFIFOValidSize(u16 u2Size)
1783{
1784 u32 timeout = 0xFFFF;
1785 while (FIFO_WR_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) > u2Size)
1786 {
1787 timeout--;
1788 if (0 == timeout)
1789 {
1790 return false;
1791 }
1792 }
1793 return true;
1794}
1795
1796/******************************************************************************
1797 * mtk_nand_status_ready
1798 *
1799 * DESCRIPTION:
1800 * Indicate the NAND device is ready or not !
1801 *
1802 * PARAMETERS:
1803 * u32 u4Status
1804 *
1805 * RETURNS:
1806 * None
1807 *
1808 * NOTES:
1809 * None
1810 *
1811 ******************************************************************************/
1812static bool mtk_nand_status_ready(u32 u4Status)
1813{
1814 u32 timeout = 0xFFFF;
1815 while ((DRV_Reg32(NFI_STA_REG32) & u4Status) != 0)
1816 {
1817 timeout--;
1818 if (0 == timeout)
1819 {
1820 return false;
1821 }
1822 }
1823 return true;
1824}
1825
1826/******************************************************************************
1827 * mtk_nand_reset
1828 *
1829 * DESCRIPTION:
1830 * Reset the NAND device hardware component !
1831 *
1832 * PARAMETERS:
1833 * struct mtk_nand_host *host (Initial setting data)
1834 *
1835 * RETURNS:
1836 * None
1837 *
1838 * NOTES:
1839 * None
1840 *
1841 ******************************************************************************/
1842static bool mtk_nand_reset(void)
1843{
1844 // HW recommended reset flow
1845 int timeout = 0xFFFF;
1846 if (DRV_Reg16(NFI_MASTERSTA_REG16) & 0xFFF) // master is busy
1847 {
1848 mb();
1849 DRV_WriteReg32(NFI_CON_REG16, CON_FIFO_FLUSH | CON_NFI_RST);
1850 while (DRV_Reg16(NFI_MASTERSTA_REG16) & 0xFFF)
1851 {
1852 timeout--;
1853 if (!timeout)
1854 {
1855 MSG(INIT, "Wait for NFI_MASTERSTA timeout\n");
1856 }
1857 }
1858 }
1859 /* issue reset operation */
1860 mb();
1861 DRV_WriteReg32(NFI_CON_REG16, CON_FIFO_FLUSH | CON_NFI_RST);
1862
1863 return mtk_nand_status_ready(STA_NFI_FSM_MASK | STA_NAND_BUSY) && mtk_nand_RFIFOValidSize(0) && mtk_nand_WFIFOValidSize(0);
1864}
1865
1866/******************************************************************************
1867 * mtk_nand_set_mode
1868 *
1869 * DESCRIPTION:
1870 * Set the oepration mode !
1871 *
1872 * PARAMETERS:
1873 * u16 u2OpMode (read/write)
1874 *
1875 * RETURNS:
1876 * None
1877 *
1878 * NOTES:
1879 * None
1880 *
1881 ******************************************************************************/
1882static void mtk_nand_set_mode(u16 u2OpMode)
1883{
1884 u16 u2Mode = DRV_Reg16(NFI_CNFG_REG16);
1885 u2Mode &= ~CNFG_OP_MODE_MASK;
1886 u2Mode |= u2OpMode;
1887 DRV_WriteReg16(NFI_CNFG_REG16, u2Mode);
1888}
1889
1890/******************************************************************************
1891 * mtk_nand_set_autoformat
1892 *
1893 * DESCRIPTION:
1894 * Enable/Disable hardware autoformat !
1895 *
1896 * PARAMETERS:
1897 * bool bEnable (Enable/Disable)
1898 *
1899 * RETURNS:
1900 * None
1901 *
1902 * NOTES:
1903 * None
1904 *
1905 ******************************************************************************/
1906static void mtk_nand_set_autoformat(bool bEnable)
1907{
1908 if (bEnable)
1909 {
1910 NFI_SET_REG16(NFI_CNFG_REG16, CNFG_AUTO_FMT_EN);
1911 } else
1912 {
1913 NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AUTO_FMT_EN);
1914 }
1915}
1916
1917/******************************************************************************
1918 * mtk_nand_configure_fdm
1919 *
1920 * DESCRIPTION:
1921 * Configure the FDM data size !
1922 *
1923 * PARAMETERS:
1924 * u16 u2FDMSize
1925 *
1926 * RETURNS:
1927 * None
1928 *
1929 * NOTES:
1930 * None
1931 *
1932 ******************************************************************************/
1933static void mtk_nand_configure_fdm(u16 u2FDMSize)
1934{
1935 NFI_CLN_REG16(NFI_PAGEFMT_REG16, PAGEFMT_FDM_MASK | PAGEFMT_FDM_ECC_MASK);
1936 NFI_SET_REG16(NFI_PAGEFMT_REG16, u2FDMSize << PAGEFMT_FDM_SHIFT);
1937 NFI_SET_REG16(NFI_PAGEFMT_REG16, u2FDMSize << PAGEFMT_FDM_ECC_SHIFT);
1938}
1939
1940
1941static bool mtk_nand_pio_ready(void)
1942{
1943 int count = 0;
1944 while (!(DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1))
1945 {
1946 count++;
1947 if (count > 0xffff)
1948 {
1949 printk("PIO_DIRDY timeout\n");
1950 return false;
1951 }
1952 }
1953
1954 return true;
1955}
1956
1957/******************************************************************************
1958 * mtk_nand_set_command
1959 *
1960 * DESCRIPTION:
1961 * Send hardware commands to NAND devices !
1962 *
1963 * PARAMETERS:
1964 * u16 command
1965 *
1966 * RETURNS:
1967 * None
1968 *
1969 * NOTES:
1970 * None
1971 *
1972 ******************************************************************************/
1973static bool mtk_nand_set_command(u16 command)
1974{
1975 /* Write command to device */
1976 mb();
1977 DRV_WriteReg16(NFI_CMD_REG16, command);
1978 return mtk_nand_status_ready(STA_CMD_STATE);
1979}
1980
1981/******************************************************************************
1982 * mtk_nand_set_address
1983 *
1984 * DESCRIPTION:
1985 * Set the hardware address register !
1986 *
1987 * PARAMETERS:
1988 * struct nand_chip *nand, u32 u4RowAddr
1989 *
1990 * RETURNS:
1991 * None
1992 *
1993 * NOTES:
1994 * None
1995 *
1996 ******************************************************************************/
1997static bool mtk_nand_set_address(u32 u4ColAddr, u32 u4RowAddr, u16 u2ColNOB, u16 u2RowNOB)
1998{
1999 /* fill cycle addr */
2000 mb();
2001 DRV_WriteReg32(NFI_COLADDR_REG32, u4ColAddr);
2002 DRV_WriteReg32(NFI_ROWADDR_REG32, u4RowAddr);
2003 DRV_WriteReg16(NFI_ADDRNOB_REG16, u2ColNOB | (u2RowNOB << ADDR_ROW_NOB_SHIFT));
2004 return mtk_nand_status_ready(STA_ADDR_STATE);
2005}
2006
2007//-------------------------------------------------------------------------------
2008static bool mtk_nand_device_reset(void)
2009{
2010 u32 timeout = 0xFFFF;
2011
2012 mtk_nand_reset();
2013
2014 DRV_WriteReg(NFI_CNFG_REG16, CNFG_OP_RESET);
2015
2016 mtk_nand_set_command(NAND_CMD_RESET);
2017
2018 while(!(DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY_RETURN) && (timeout--));
2019
2020 if(!timeout)
2021 return FALSE;
2022 else
2023 return TRUE;
2024}
2025//-------------------------------------------------------------------------------
2026
2027/******************************************************************************
2028 * mtk_nand_check_RW_count
2029 *
2030 * DESCRIPTION:
2031 * Check the RW how many sectors !
2032 *
2033 * PARAMETERS:
2034 * u16 u2WriteSize
2035 *
2036 * RETURNS:
2037 * None
2038 *
2039 * NOTES:
2040 * None
2041 *
2042 ******************************************************************************/
2043static bool mtk_nand_check_RW_count(u16 u2WriteSize)
2044{
2045 u32 timeout = 0xFFFF;
2046 u16 u2SecNum = u2WriteSize >> host->hw->nand_sec_shift;
2047
2048 while (ADDRCNTR_CNTR(DRV_Reg32(NFI_ADDRCNTR_REG16)) < u2SecNum)
2049 {
2050 timeout--;
2051 if (0 == timeout)
2052 {
2053 printk(KERN_INFO "[%s] timeout\n", __FUNCTION__);
2054 return false;
2055 }
2056 }
2057 return true;
2058}
2059
2060/******************************************************************************
2061 * mtk_nand_ready_for_read
2062 *
2063 * DESCRIPTION:
2064 * Prepare hardware environment for read !
2065 *
2066 * PARAMETERS:
2067 * struct nand_chip *nand, u32 u4RowAddr
2068 *
2069 * RETURNS:
2070 * None
2071 *
2072 * NOTES:
2073 * None
2074 *
2075 ******************************************************************************/
2076static bool mtk_nand_ready_for_read(struct nand_chip *nand, u32 u4RowAddr, u32 u4ColAddr, u16 sec_num, bool full, u8 * buf)
2077{
2078 /* Reset NFI HW internal state machine and flush NFI in/out FIFO */
2079 bool bRet = false;
2080 //u16 sec_num = 1 << (nand->page_shift - host->hw->nand_sec_shift);
2081 u32 col_addr = u4ColAddr;
2082 u32 colnob = 2, rownob = devinfo.addr_cycle - 2;
2083 //u32 reg_val = DRV_Reg32(NFI_MASTERRST_REG32);
2084#if __INTERNAL_USE_AHB_MODE__
2085 u32 phys = 0;
2086#endif
2087#if CFG_PERFLOG_DEBUG
2088 struct timeval stimer,etimer;
2089 do_gettimeofday(&stimer);
2090#endif
2091 if(DRV_Reg32(NFI_NAND_TYPE_CNFG_REG32)&0x3)
2092 {
2093 NFI_SET_REG16(NFI_MASTERRST_REG32, PAD_MACRO_RST);//reset
2094 NFI_CLN_REG16(NFI_MASTERRST_REG32, PAD_MACRO_RST);//dereset
2095 }
2096
2097 if (nand->options & NAND_BUSWIDTH_16)
2098 col_addr /= 2;
2099
2100 if (!mtk_nand_reset())
2101 {
2102 goto cleanup;
2103 }
2104 if (g_bHwEcc)
2105 {
2106 /* Enable HW ECC */
2107 NFI_SET_REG32(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
2108 } else
2109 {
2110 NFI_CLN_REG32(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
2111 }
2112
2113 mtk_nand_set_mode(CNFG_OP_READ);
2114 NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
2115 DRV_WriteReg32(NFI_CON_REG16, sec_num << CON_NFI_SEC_SHIFT);
2116
2117 if (full)
2118 {
2119#if __INTERNAL_USE_AHB_MODE__
2120 NFI_SET_REG16(NFI_CNFG_REG16, CNFG_AHB);
2121 phys = nand_virt_to_phys_add((u32) buf);
2122 if (!phys)
2123 {
2124 printk(KERN_ERR "[mtk_nand_ready_for_read]convert virt addr (%x) to phys add (%x)fail!!!", (u32) buf, phys);
2125 return false;
2126 } else
2127 {
2128 DRV_WriteReg32(NFI_STRADDR_REG32, phys);
2129 }
2130#else
2131 NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
2132#endif
2133
2134 if (g_bHwEcc)
2135 {
2136 NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
2137 } else
2138 {
2139 NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
2140 }
2141
2142 } else
2143 {
2144 NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
2145 NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
2146 }
2147
2148 mtk_nand_set_autoformat(full);
2149 if (full)
2150 {
2151 if (g_bHwEcc)
2152 {
2153 ECC_Decode_Start();
2154 }
2155 }
2156 if (!mtk_nand_set_command(NAND_CMD_READ0))
2157 {
2158 goto cleanup;
2159 }
2160 if (!mtk_nand_set_address(col_addr, u4RowAddr, colnob, rownob))
2161 {
2162 goto cleanup;
2163 }
2164
2165 if (!mtk_nand_set_command(NAND_CMD_READSTART))
2166 {
2167 goto cleanup;
2168 }
2169
2170 if (!mtk_nand_status_ready(STA_NAND_BUSY))
2171 {
2172 goto cleanup;
2173 }
2174
2175 bRet = true;
2176
2177 cleanup:
2178 #if CFG_PERFLOG_DEBUG
2179 do_gettimeofday(&etimer);
2180 g_NandPerfLog.ReadBusyTotalTime+= Cal_timediff(&etimer,&stimer);
2181 g_NandPerfLog.ReadBusyCount++;
2182 #endif
2183 return bRet;
2184}
2185
2186/******************************************************************************
2187 * mtk_nand_ready_for_write
2188 *
2189 * DESCRIPTION:
2190 * Prepare hardware environment for write !
2191 *
2192 * PARAMETERS:
2193 * struct nand_chip *nand, u32 u4RowAddr
2194 *
2195 * RETURNS:
2196 * None
2197 *
2198 * NOTES:
2199 * None
2200 *
2201 ******************************************************************************/
2202static bool mtk_nand_ready_for_write(struct nand_chip *nand, u32 u4RowAddr, u32 col_addr, bool full, u8 * buf)
2203{
2204 bool bRet = false;
2205 u32 sec_num = 1 << (nand->page_shift - host->hw->nand_sec_shift);
2206 u32 colnob = 2, rownob = devinfo.addr_cycle - 2;
2207#if __INTERNAL_USE_AHB_MODE__
2208 u32 phys = 0;
2209 //u32 T_phys=0;
2210#endif
2211 if (nand->options & NAND_BUSWIDTH_16)
2212 col_addr /= 2;
2213
2214 /* Reset NFI HW internal state machine and flush NFI in/out FIFO */
2215 if (!mtk_nand_reset())
2216 {
2217 printk("[Bean]mtk_nand_ready_for_write (mtk_nand_reset) fail!\n");
2218 return false;
2219 }
2220
2221 mtk_nand_set_mode(CNFG_OP_PRGM);
2222
2223 NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
2224
2225 DRV_WriteReg32(NFI_CON_REG16, sec_num << CON_NFI_SEC_SHIFT);
2226
2227 if (full)
2228 {
2229#if __INTERNAL_USE_AHB_MODE__
2230 NFI_SET_REG16(NFI_CNFG_REG16, CNFG_AHB);
2231 phys = nand_virt_to_phys_add((u32) buf);
2232 //T_phys=__virt_to_phys(buf);
2233 if (!phys)
2234 {
2235 printk(KERN_ERR "[mt65xx_nand_ready_for_write]convert virt addr (%x) to phys add fail!!!", (u32) buf);
2236 return false;
2237 } else
2238 {
2239 DRV_WriteReg32(NFI_STRADDR_REG32, phys);
2240 }
2241#if 0
2242 if ((T_phys > 0x700000 && T_phys < 0x800000) || (phys > 0x700000 && phys < 0x800000))
2243 {
2244 {
2245 printk("[NFI_WRITE]ERROR: Forbidden AHB address wrong phys address =0x%x , right phys address=0x%x, virt address= 0x%x (count = %d)\n", T_phys, phys, (u32) buf, g_dump_count++);
2246 show_stack(NULL, NULL);
2247 }
2248 BUG_ON(1);
2249 }
2250#endif
2251#else
2252 NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
2253#endif
2254 if (g_bHwEcc)
2255 {
2256 NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
2257 } else
2258 {
2259 NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
2260 }
2261 } else
2262 {
2263 NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
2264 NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
2265 }
2266
2267 mtk_nand_set_autoformat(full);
2268
2269 if (full)
2270 {
2271 if (g_bHwEcc)
2272 {
2273 ECC_Encode_Start();
2274 }
2275 }
2276
2277 if (!mtk_nand_set_command(NAND_CMD_SEQIN))
2278 {
2279 printk("[Bean]mtk_nand_ready_for_write (mtk_nand_set_command) fail!\n");
2280 goto cleanup;
2281 }
2282 //1 FIXED ME: For Any Kind of AddrCycle
2283 if (!mtk_nand_set_address(col_addr, u4RowAddr, colnob, rownob))
2284 {
2285 printk("[Bean]mtk_nand_ready_for_write (mtk_nand_set_address) fail!\n");
2286 goto cleanup;
2287 }
2288
2289 if (!mtk_nand_status_ready(STA_NAND_BUSY))
2290 {
2291 printk("[Bean]mtk_nand_ready_for_write (mtk_nand_status_ready) fail!\n");
2292 goto cleanup;
2293 }
2294
2295 bRet = true;
2296 cleanup:
2297
2298 return bRet;
2299}
2300
2301static bool mtk_nand_check_dececc_done(u32 u4SecNum)
2302{
2303 u32 dec_mask;
2304 struct timeval timer_timeout, timer_cur;
2305 do_gettimeofday(&timer_timeout);
2306
2307 timer_timeout.tv_usec += 800 * 1000; // 500ms
2308 if (timer_timeout.tv_usec >= 1000000) // 1 second
2309 {
2310 timer_timeout.tv_usec -= 1000000;
2311 timer_timeout.tv_sec += 1;
2312 }
2313
2314 dec_mask = (1 << (u4SecNum - 1));
2315 while (dec_mask != (DRV_Reg(ECC_DECDONE_REG16) & dec_mask))
2316 {
2317 do_gettimeofday(&timer_cur);
2318 if (timeval_compare(&timer_cur, &timer_timeout) >= 0)
2319 {
2320 MSG(INIT, "ECC_DECDONE: timeout 0x%x %d\n",DRV_Reg(ECC_DECDONE_REG16),u4SecNum);
2321 dump_nfi();
2322 return false;
2323 }
2324 }
2325 while (DRV_Reg32(ECC_DECFSM_REG32) != ECC_DECFSM_IDLE)
2326 {
2327 do_gettimeofday(&timer_cur);
2328 if (timeval_compare(&timer_cur, &timer_timeout) >= 0)
2329 {
2330 MSG(INIT, "ECC_DECDONE: timeout 0x%x %d\n",DRV_Reg(ECC_DECDONE_REG16),u4SecNum);
2331 dump_nfi();
2332 return false;
2333 }
2334 }
2335 return true;
2336}
2337
2338/******************************************************************************
2339 * mtk_nand_read_page_data
2340 *
2341 * DESCRIPTION:
2342 * Fill the page data into buffer !
2343 *
2344 * PARAMETERS:
2345 * u8* pDataBuf, u32 u4Size
2346 *
2347 * RETURNS:
2348 * None
2349 *
2350 * NOTES:
2351 * None
2352 *
2353 ******************************************************************************/
2354static bool mtk_nand_dma_read_data(struct mtd_info *mtd, u8 * buf, u32 length)
2355{
2356 int interrupt_en = g_i4Interrupt;
2357 int timeout = 0xfffff;
2358 struct scatterlist sg;
2359 enum dma_data_direction dir = DMA_FROM_DEVICE;
2360#if CFG_PERFLOG_DEBUG
2361 struct timeval stimer,etimer;
2362 do_gettimeofday(&stimer);
2363#endif
2364 sg_init_one(&sg, buf, length);
2365 dma_map_sg(&(mtd->dev), &sg, 1, dir);
2366
2367 NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
2368 // DRV_WriteReg32(NFI_STRADDR_REG32, __virt_to_phys(pDataBuf));
2369
2370 if ((unsigned int)buf % 16) // TODO: can not use AHB mode here
2371 {
2372 printk(KERN_INFO "Un-16-aligned address\n");
2373 NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_DMA_BURST_EN);
2374 } else
2375 {
2376 NFI_SET_REG16(NFI_CNFG_REG16, CNFG_DMA_BURST_EN);
2377 }
2378
2379 DRV_Reg16(NFI_INTR_REG16);
2380 DRV_WriteReg16(NFI_INTR_EN_REG16, INTR_AHB_DONE_EN);
2381
2382 if (interrupt_en)
2383 {
2384 init_completion(&g_comp_AHB_Done);
2385 }
2386 //dmac_inv_range(pDataBuf, pDataBuf + u4Size);
2387 mb();
2388 NFI_SET_REG32(NFI_CON_REG16, CON_NFI_BRD);
2389 g_running_dma = 1;
2390
2391 if (interrupt_en)
2392 {
2393 // Wait 10ms for AHB done
2394 if (!wait_for_completion_timeout(&g_comp_AHB_Done, 50))
2395 {
2396 MSG(INIT, "wait for completion timeout happened @ [%s]: %d\n", __FUNCTION__, __LINE__);
2397 dump_nfi();
2398 g_running_dma = 0;
2399 return false;
2400 }
2401 g_running_dma = 0;
2402 while ((length >> host->hw->nand_sec_shift) > ((DRV_Reg32(NFI_BYTELEN_REG16) & 0x1f000) >> 12))
2403 {
2404 timeout--;
2405 if (0 == timeout)
2406 {
2407 printk(KERN_ERR "[%s] poll BYTELEN error\n", __FUNCTION__);
2408 g_running_dma = 0;
2409 return false; //4 // AHB Mode Time Out!
2410 }
2411 }
2412 } else
2413 {
2414 while (!DRV_Reg16(NFI_INTR_REG16))
2415 {
2416 timeout--;
2417 if (0 == timeout)
2418 {
2419 printk(KERN_ERR "[%s] poll nfi_intr error\n", __FUNCTION__);
2420 dump_nfi();
2421 g_running_dma = 0;
2422 return false; //4 // AHB Mode Time Out!
2423 }
2424 }
2425 g_running_dma = 0;
2426 while ((length >> host->hw->nand_sec_shift) > ((DRV_Reg32(NFI_BYTELEN_REG16) & 0x1f000) >> 12))
2427 {
2428 timeout--;
2429 if (0 == timeout)
2430 {
2431 printk(KERN_ERR "[%s] poll BYTELEN error\n", __FUNCTION__);
2432 dump_nfi();
2433 g_running_dma = 0;
2434 return false; //4 // AHB Mode Time Out!
2435 }
2436 }
2437 }
2438
2439 dma_unmap_sg(&(mtd->dev), &sg, 1, dir);
2440#if CFG_PERFLOG_DEBUG
2441 do_gettimeofday(&etimer);
2442 g_NandPerfLog.ReadDMATotalTime+= Cal_timediff(&etimer,&stimer);
2443 g_NandPerfLog.ReadDMACount++;
2444#endif
2445 return true;
2446}
2447
2448static bool mtk_nand_mcu_read_data(u8 * buf, u32 length)
2449{
2450 int timeout = 0xffff;
2451 u32 i;
2452 u32 *buf32 = (u32 *) buf;
2453#ifdef TESTTIME
2454 unsigned long long time1, time2;
2455 time1 = sched_clock();
2456#endif
2457 if ((u32) buf % 4 || length % 4)
2458 NFI_SET_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
2459 else
2460 NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
2461
2462 //DRV_WriteReg32(NFI_STRADDR_REG32, 0);
2463 mb();
2464 NFI_SET_REG32(NFI_CON_REG16, CON_NFI_BRD);
2465
2466 if ((u32) buf % 4 || length % 4)
2467 {
2468 for (i = 0; (i < (length)) && (timeout > 0);)
2469 {
2470 //if (FIFO_RD_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) >= 4)
2471 if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1)
2472 {
2473 *buf++ = (u8) DRV_Reg32(NFI_DATAR_REG32);
2474 i++;
2475 } else
2476 {
2477 timeout--;
2478 }
2479 if (0 == timeout)
2480 {
2481 printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
2482 dump_nfi();
2483 return false;
2484 }
2485 }
2486 } else
2487 {
2488 for (i = 0; (i < (length >> 2)) && (timeout > 0);)
2489 {
2490 //if (FIFO_RD_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) >= 4)
2491 if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1)
2492 {
2493 *buf32++ = DRV_Reg32(NFI_DATAR_REG32);
2494 i++;
2495 } else
2496 {
2497 timeout--;
2498 }
2499 if (0 == timeout)
2500 {
2501 printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
2502 dump_nfi();
2503 return false;
2504 }
2505 }
2506 }
2507#ifdef TESTTIME
2508 time2 = sched_clock() - time1;
2509 if (!readdatatime)
2510 {
2511 readdatatime = (time2);
2512 }
2513#endif
2514 return true;
2515}
2516
2517static bool mtk_nand_read_page_data(struct mtd_info *mtd, u8 * pDataBuf, u32 u4Size)
2518{
2519#if (__INTERNAL_USE_AHB_MODE__)
2520 return mtk_nand_dma_read_data(mtd, pDataBuf, u4Size);
2521#else
2522 return mtk_nand_mcu_read_data(mtd, pDataBuf, u4Size);
2523#endif
2524}
2525
2526/******************************************************************************
2527 * mtk_nand_write_page_data
2528 *
2529 * DESCRIPTION:
2530 * Fill the page data into buffer !
2531 *
2532 * PARAMETERS:
2533 * u8* pDataBuf, u32 u4Size
2534 *
2535 * RETURNS:
2536 * None
2537 *
2538 * NOTES:
2539 * None
2540 *
2541 ******************************************************************************/
2542static bool mtk_nand_dma_write_data(struct mtd_info *mtd, u8 * pDataBuf, u32 u4Size)
2543{
2544 int i4Interrupt = 0; //g_i4Interrupt;
2545 u32 timeout = 0xFFFF;
2546 struct scatterlist sg;
2547 enum dma_data_direction dir = DMA_TO_DEVICE;
2548#if CFG_PERFLOG_DEBUG
2549 struct timeval stimer,etimer;
2550 do_gettimeofday(&stimer);
2551#endif
2552 sg_init_one(&sg, pDataBuf, u4Size);
2553 dma_map_sg(&(mtd->dev), &sg, 1, dir);
2554
2555 NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
2556 DRV_Reg16(NFI_INTR_REG16);
2557 DRV_WriteReg16(NFI_INTR_EN_REG16, 0);
2558 // DRV_WriteReg32(NFI_STRADDR_REG32, (u32*)virt_to_phys(pDataBuf));
2559
2560 if ((unsigned int)pDataBuf % 16) // TODO: can not use AHB mode here
2561 {
2562 printk(KERN_INFO "Un-16-aligned address\n");
2563 NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_DMA_BURST_EN);
2564 } else
2565 {
2566 NFI_SET_REG16(NFI_CNFG_REG16, CNFG_DMA_BURST_EN);
2567 }
2568
2569 if (i4Interrupt)
2570 {
2571 init_completion(&g_comp_AHB_Done);
2572 DRV_Reg16(NFI_INTR_REG16);
2573 DRV_WriteReg16(NFI_INTR_EN_REG16, INTR_AHB_DONE_EN);
2574 }
2575 //dmac_clean_range(pDataBuf, pDataBuf + u4Size);
2576 mb();
2577 NFI_SET_REG32(NFI_CON_REG16, CON_NFI_BWR);
2578 g_running_dma = 3;
2579 if (i4Interrupt)
2580 {
2581 // Wait 10ms for AHB done
2582 if (!wait_for_completion_timeout(&g_comp_AHB_Done, 10))
2583 {
2584 MSG(READ, "wait for completion timeout happened @ [%s]: %d\n", __FUNCTION__, __LINE__);
2585 dump_nfi();
2586 g_running_dma = 0;
2587 return false;
2588 }
2589 g_running_dma = 0;
2590 // wait_for_completion(&g_comp_AHB_Done);
2591 } else
2592 {
2593 while ((u4Size >> host->hw->nand_sec_shift) > ((DRV_Reg32(NFI_BYTELEN_REG16) & 0x1f000) >> 12))
2594 {
2595 timeout--;
2596 if (0 == timeout)
2597 {
2598 printk(KERN_ERR "[%s] poll BYTELEN error\n", __FUNCTION__);
2599 g_running_dma = 0;
2600 return false; //4 // AHB Mode Time Out!
2601 }
2602 }
2603 g_running_dma = 0;
2604 }
2605
2606 dma_unmap_sg(&(mtd->dev), &sg, 1, dir);
2607#if CFG_PERFLOG_DEBUG
2608 do_gettimeofday(&etimer);
2609 g_NandPerfLog.WriteDMATotalTime+= Cal_timediff(&etimer,&stimer);
2610 g_NandPerfLog.WriteDMACount++;
2611#endif
2612 return true;
2613}
2614
2615static bool mtk_nand_mcu_write_data(struct mtd_info *mtd, const u8 * buf, u32 length)
2616{
2617 u32 timeout = 0xFFFF;
2618 u32 i;
2619 u32 *pBuf32;
2620 NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
2621 mb();
2622 NFI_SET_REG32(NFI_CON_REG16, CON_NFI_BWR);
2623 pBuf32 = (u32 *) buf;
2624
2625 if ((u32) buf % 4 || length % 4)
2626 NFI_SET_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
2627 else
2628 NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
2629
2630 if ((u32) buf % 4 || length % 4)
2631 {
2632 for (i = 0; (i < (length)) && (timeout > 0);)
2633 {
2634 if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1)
2635 {
2636 DRV_WriteReg32(NFI_DATAW_REG32, *buf++);
2637 i++;
2638 } else
2639 {
2640 timeout--;
2641 }
2642 if (0 == timeout)
2643 {
2644 printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
2645 dump_nfi();
2646 return false;
2647 }
2648 }
2649 } else
2650 {
2651 for (i = 0; (i < (length >> 2)) && (timeout > 0);)
2652 {
2653 // if (FIFO_WR_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) <= 12)
2654 if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1)
2655 {
2656 DRV_WriteReg32(NFI_DATAW_REG32, *pBuf32++);
2657 i++;
2658 } else
2659 {
2660 timeout--;
2661 }
2662 if (0 == timeout)
2663 {
2664 printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
2665 dump_nfi();
2666 return false;
2667 }
2668 }
2669 }
2670
2671 return true;
2672}
2673
2674static bool mtk_nand_write_page_data(struct mtd_info *mtd, u8 * buf, u32 size)
2675{
2676#if (__INTERNAL_USE_AHB_MODE__)
2677 return mtk_nand_dma_write_data(mtd, buf, size);
2678#else
2679 return mtk_nand_mcu_write_data(mtd, buf, size);
2680#endif
2681}
2682
2683/******************************************************************************
2684 * mtk_nand_read_fdm_data
2685 *
2686 * DESCRIPTION:
2687 * Read a fdm data !
2688 *
2689 * PARAMETERS:
2690 * u8* pDataBuf, u32 u4SecNum
2691 *
2692 * RETURNS:
2693 * None
2694 *
2695 * NOTES:
2696 * None
2697 *
2698 ******************************************************************************/
2699static void mtk_nand_read_fdm_data(u8 * pDataBuf, u32 u4SecNum)
2700{
2701 u32 i;
2702 u32 *pBuf32 = (u32 *) pDataBuf;
2703
2704 if (pBuf32)
2705 {
2706 for (i = 0; i < u4SecNum; ++i)
2707 {
2708 *pBuf32++ = DRV_Reg32(NFI_FDM0L_REG32 + (i << 1));
2709 *pBuf32++ = DRV_Reg32(NFI_FDM0M_REG32 + (i << 1));
2710 //*pBuf32++ = DRV_Reg32((u32)NFI_FDM0L_REG32 + (i<<3));
2711 //*pBuf32++ = DRV_Reg32((u32)NFI_FDM0M_REG32 + (i<<3));
2712 }
2713 }
2714}
2715
2716/******************************************************************************
2717 * mtk_nand_write_fdm_data
2718 *
2719 * DESCRIPTION:
2720 * Write a fdm data !
2721 *
2722 * PARAMETERS:
2723 * u8* pDataBuf, u32 u4SecNum
2724 *
2725 * RETURNS:
2726 * None
2727 *
2728 * NOTES:
2729 * None
2730 *
2731 ******************************************************************************/
2732static u8 fdm_buf[128];
2733static void mtk_nand_write_fdm_data(struct nand_chip *chip, u8 * pDataBuf, u32 u4SecNum)
2734{
2735 u32 i, j;
2736 u8 checksum = 0;
2737 bool empty = true;
2738 struct nand_oobfree *free_entry;
2739 u32 *pBuf32;
2740
2741 memcpy(fdm_buf, pDataBuf, u4SecNum * 8);
2742
2743 free_entry = chip->ecc.layout->oobfree;
2744 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free_entry[i].length; i++)
2745 {
2746 for (j = 0; j < free_entry[i].length; j++)
2747 {
2748 if (pDataBuf[free_entry[i].offset + j] != 0xFF)
2749 empty = false;
2750 checksum ^= pDataBuf[free_entry[i].offset + j];
2751 }
2752 }
2753
2754 if (!empty)
2755 {
2756 fdm_buf[free_entry[i - 1].offset + free_entry[i - 1].length] = checksum;
2757 }
2758
2759 pBuf32 = (u32 *) fdm_buf;
2760 for (i = 0; i < u4SecNum; ++i)
2761 {
2762 DRV_WriteReg32(NFI_FDM0L_REG32 + (i << 1), *pBuf32++);
2763 DRV_WriteReg32(NFI_FDM0M_REG32 + (i << 1), *pBuf32++);
2764 //DRV_WriteReg32((u32)NFI_FDM0L_REG32 + (i<<3), *pBuf32++);
2765 //DRV_WriteReg32((u32)NFI_FDM0M_REG32 + (i<<3), *pBuf32++);
2766 }
2767}
2768
2769/******************************************************************************
2770 * mtk_nand_stop_read
2771 *
2772 * DESCRIPTION:
2773 * Stop read operation !
2774 *
2775 * PARAMETERS:
2776 * None
2777 *
2778 * RETURNS:
2779 * None
2780 *
2781 * NOTES:
2782 * None
2783 *
2784 ******************************************************************************/
2785static void mtk_nand_stop_read(void)
2786{
2787 NFI_CLN_REG32(NFI_CON_REG16, CON_NFI_BRD);
2788 mtk_nand_reset();
2789 if (g_bHwEcc)
2790 {
2791 ECC_Decode_End();
2792 }
2793 DRV_WriteReg16(NFI_INTR_EN_REG16, 0);
2794}
2795
2796/******************************************************************************
2797 * mtk_nand_stop_write
2798 *
2799 * DESCRIPTION:
2800 * Stop write operation !
2801 *
2802 * PARAMETERS:
2803 * None
2804 *
2805 * RETURNS:
2806 * None
2807 *
2808 * NOTES:
2809 * None
2810 *
2811 ******************************************************************************/
2812static void mtk_nand_stop_write(void)
2813{
2814 NFI_CLN_REG32(NFI_CON_REG16, CON_NFI_BWR);
2815 if (g_bHwEcc)
2816 {
2817 ECC_Encode_End();
2818 }
2819 DRV_WriteReg16(NFI_INTR_EN_REG16, 0);
2820}
2821
2822//---------------------------------------------------------------------------
2823#define STATUS_READY (0x40)
2824#define STATUS_FAIL (0x01)
2825#define STATUS_WR_ALLOW (0x80)
2826
2827static bool mtk_nand_read_status(void)
2828{
2829 int status = 0;//, i;
2830 unsigned int timeout;
2831
2832 mtk_nand_reset();
2833
2834 /* Disable HW ECC */
2835 NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
2836
2837 /* Disable 16-bit I/O */
2838 NFI_CLN_REG16(NFI_PAGEFMT_REG16, PAGEFMT_DBYTE_EN);
2839 NFI_SET_REG16(NFI_CNFG_REG16, CNFG_OP_SRD | CNFG_READ_EN | CNFG_BYTE_RW);
2840
2841 DRV_WriteReg32(NFI_CON_REG16, CON_NFI_SRD | (1 << CON_NFI_NOB_SHIFT));
2842
2843 DRV_WriteReg32(NFI_CON_REG16, 0x3);
2844 mtk_nand_set_mode(CNFG_OP_SRD);
2845 DRV_WriteReg16(NFI_CNFG_REG16, 0x2042);
2846 mtk_nand_set_command(NAND_CMD_STATUS);
2847 DRV_WriteReg32(NFI_CON_REG16, 0x90);
2848
2849 timeout = TIMEOUT_4;
2850 WAIT_NFI_PIO_READY(timeout);
2851
2852 if (timeout)
2853 {
2854 status = (DRV_Reg16(NFI_DATAR_REG32));
2855 }
2856 //~ clear NOB
2857 DRV_WriteReg32(NFI_CON_REG16, 0);
2858
2859 if (devinfo.iowidth == 16)
2860 {
2861 NFI_SET_REG16(NFI_PAGEFMT_REG16, PAGEFMT_DBYTE_EN);
2862 NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
2863 }
2864 // check READY/BUSY status first
2865 if (!(STATUS_READY & status))
2866 {
2867 //MSG(ERR, "status is not ready\n");
2868 }
2869 // flash is ready now, check status code
2870 if (STATUS_FAIL & status)
2871 {
2872 if (!(STATUS_WR_ALLOW & status))
2873 {
2874 //MSG(INIT, "status locked\n");
2875 return FALSE;
2876 } else
2877 {
2878 //MSG(INIT, "status unknown\n");
2879 return FALSE;
2880 }
2881 } else
2882 {
2883 return TRUE;
2884 }
2885}
2886
2887bool mtk_nand_SetFeature(struct mtd_info *mtd, u16 cmd, u32 addr, u8 *value, u8 bytes)
2888{
2889 u16 reg_val = 0;
2890 u8 write_count = 0;
2891 u32 reg = 0;
2892 u32 timeout=TIMEOUT_3;//0xffff;
2893// u32 status;
2894// struct nand_chip *chip = (struct nand_chip *)mtd->priv;
2895
2896 mtk_nand_reset();
2897
2898 reg = DRV_Reg32(NFI_NAND_TYPE_CNFG_REG32);
2899 if (!(reg&TYPE_SLC))
2900 bytes <<= 1;
2901
2902 reg_val |= (CNFG_OP_CUST | CNFG_BYTE_RW);
2903 DRV_WriteReg(NFI_CNFG_REG16, reg_val);
2904
2905 mtk_nand_set_command(cmd);
2906 mtk_nand_set_address(addr, 0, 1, 0);
2907
2908 mtk_nand_status_ready(STA_NFI_OP_MASK);
2909
2910 DRV_WriteReg32(NFI_CON_REG16, 1 << CON_NFI_SEC_SHIFT);
2911 NFI_SET_REG32(NFI_CON_REG16, CON_NFI_BWR);
2912 DRV_WriteReg(NFI_STRDATA_REG16, 0x1);
2913 //printk("Bytes=%d\n", bytes);
2914 while ( (write_count < bytes) && timeout )
2915 {
2916 WAIT_NFI_PIO_READY(timeout)
2917 if(timeout == 0)
2918 {
2919 break;
2920 }
2921 if (reg&TYPE_SLC)
2922 {
2923 //printk("VALUE1:0x%2X\n", *value);
2924 DRV_WriteReg8(NFI_DATAW_REG32, *value++);
2925 }else if(write_count % 2)
2926 {
2927 //printk("VALUE2:0x%2X\n", *value);
2928 DRV_WriteReg8(NFI_DATAW_REG32, *value++);
2929 }
2930 else
2931 {
2932 //printk("VALUE3:0x%2X\n", *value);
2933 DRV_WriteReg8(NFI_DATAW_REG32, *value);
2934 }
2935 write_count++;
2936 timeout = TIMEOUT_3;
2937 }
2938 *NFI_CNRNB_REG16 = 0x81;
2939 if (!mtk_nand_status_ready(STA_NAND_BUSY_RETURN))
2940 {
2941 return FALSE;
2942 }
2943
2944 //mtk_nand_read_status();
2945 //if(status& 0x1)
2946 // return FALSE;
2947 return TRUE;
2948}
2949
2950bool mtk_nand_GetFeature(struct mtd_info *mtd, u16 cmd, u32 addr, u8 *value, u8 bytes)
2951{
2952 u16 reg_val = 0;
2953 u8 read_count = 0;
2954 u32 timeout=TIMEOUT_3;//0xffff;
2955// struct nand_chip *chip = (struct nand_chip *)mtd->priv;
2956
2957 mtk_nand_reset();
2958
2959 reg_val |= (CNFG_OP_CUST | CNFG_BYTE_RW | CNFG_READ_EN);
2960 DRV_WriteReg(NFI_CNFG_REG16, reg_val);
2961
2962 mtk_nand_set_command(cmd);
2963 mtk_nand_set_address(addr, 0, 1, 0);
2964 mtk_nand_status_ready(STA_NFI_OP_MASK);
2965 *NFI_CNRNB_REG16 = 0x81;
2966 mtk_nand_status_ready(STA_NAND_BUSY_RETURN);
2967
2968 //DRV_WriteReg32(NFI_CON_REG16, 0 << CON_NFI_SEC_SHIFT);
2969 reg_val = DRV_Reg32(NFI_CON_REG16);
2970 reg_val &= ~CON_NFI_NOB_MASK;
2971 reg_val |= ((4 << CON_NFI_NOB_SHIFT)|CON_NFI_SRD);
2972 DRV_WriteReg32(NFI_CON_REG16, reg_val);
2973 DRV_WriteReg(NFI_STRDATA_REG16, 0x1);
2974 //bytes = 20;
2975 while ( (read_count < bytes) && timeout )
2976 {
2977 WAIT_NFI_PIO_READY(timeout)
2978 if(timeout == 0)
2979 {
2980 break;
2981 }
2982 *value++ = DRV_Reg8(NFI_DATAR_REG32);
2983 //printk("Value[0x%02X]\n", DRV_Reg8(NFI_DATAR_REG32));
2984 read_count++;
2985 timeout = TIMEOUT_3;
2986 }
2987// chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
2988 //mtk_nand_read_status();
2989 if(timeout != 0)
2990 return TRUE;
2991 else
2992 return FALSE;
2993
2994}
2995
2996#if 1
2997const u8 data_tbl[8][5] =
2998{
2999 {0x04, 0x04, 0x7C, 0x7E, 0x00},
3000 {0x00, 0x7C, 0x78, 0x78, 0x00},
3001 {0x7C, 0x76, 0x74, 0x72, 0x00},
3002 {0x08, 0x08, 0x00, 0x00, 0x00},
3003 {0x0B, 0x7E, 0x76, 0x74, 0x00},
3004 {0x10, 0x76, 0x72, 0x70, 0x00},
3005 {0x02, 0x7C, 0x7E, 0x70, 0x00},
3006 {0x00, 0x00, 0x00, 0x00, 0x00}
3007};
3008
3009static void mtk_nand_modeentry_rrtry(void)
3010{
3011 mtk_nand_reset();
3012
3013 mtk_nand_set_mode(CNFG_OP_CUST);
3014
3015 mtk_nand_set_command(0x5C);
3016 mtk_nand_set_command(0xC5);
3017
3018 mtk_nand_status_ready(STA_NFI_OP_MASK);
3019}
3020
3021static void mtk_nand_rren_rrtry(bool needB3)
3022{
3023 mtk_nand_reset();
3024
3025 mtk_nand_set_mode(CNFG_OP_CUST);
3026
3027 if(needB3)
3028 mtk_nand_set_command(0xB3);
3029 mtk_nand_set_command(0x26);
3030 mtk_nand_set_command(0x5D);
3031
3032 mtk_nand_status_ready(STA_NFI_OP_MASK);
3033}
3034
3035static void mtk_nand_sprmset_rrtry(u32 addr, u32 data) //single parameter setting
3036{
3037 u16 reg_val = 0;
3038 u8 write_count = 0;
3039 u32 reg = 0;
3040 u32 timeout=TIMEOUT_3;//0xffff;
3041
3042 mtk_nand_reset();
3043
3044 reg_val |= (CNFG_OP_CUST | CNFG_BYTE_RW);
3045 DRV_WriteReg(NFI_CNFG_REG16, reg_val);
3046 mtk_nand_set_command(0x55);
3047 mtk_nand_set_address(addr, 0, 1, 0);
3048 mtk_nand_status_ready(STA_NFI_OP_MASK);
3049 DRV_WriteReg32(NFI_CON_REG16, 1 << CON_NFI_SEC_SHIFT);
3050 NFI_SET_REG32(NFI_CON_REG16, CON_NFI_BWR);
3051 DRV_WriteReg(NFI_STRDATA_REG16, 0x1);
3052 WAIT_NFI_PIO_READY(timeout);
3053 timeout=TIMEOUT_3;
3054 DRV_WriteReg8(NFI_DATAW_REG32, data);
3055
3056 while(!(DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY_RETURN) && (timeout--));
3057}
3058
3059static void mtk_nand_toshiba_rrtry(struct mtd_info *mtd,flashdev_info_t deviceinfo, u32 retryCount, bool defValue)
3060{
3061 u32 acccon;
3062 u8 cnt = 0;
3063 u8 add_reg[6] = {0x04, 0x05, 0x06, 0x07, 0x0D};
3064
3065 acccon = DRV_Reg32(NFI_ACCCON_REG32);
3066 DRV_WriteReg32(NFI_ACCCON_REG32, 0x31C08669); //to fit read retry timing
3067
3068 if(0 == retryCount)
3069 mtk_nand_modeentry_rrtry();
3070
3071 for(cnt = 0; cnt < 5; cnt ++)
3072 {
3073 mtk_nand_sprmset_rrtry(add_reg[cnt], data_tbl[retryCount][cnt]);
3074 }
3075
3076 if(3 == retryCount)
3077 mtk_nand_rren_rrtry(TRUE);
3078 else if(6 > retryCount)
3079 mtk_nand_rren_rrtry(FALSE);
3080
3081 if(7 == retryCount) // to exit
3082 {
3083 mtk_nand_device_reset();
3084 mtk_nand_reset();
3085 //should do NAND DEVICE interface change under sync mode
3086 }
3087
3088 DRV_WriteReg32(NFI_ACCCON_REG32, acccon);
3089}
3090
3091#endif
3092static void mtk_nand_micron_rrtry(struct mtd_info *mtd,flashdev_info_t deviceinfo, u32 feature, bool defValue)
3093{
3094 //u32 feature = deviceinfo.feature_set.FeatureSet.readRetryStart+retryCount;
3095 mtk_nand_SetFeature(mtd, deviceinfo.feature_set.FeatureSet.sfeatureCmd,\
3096 deviceinfo.feature_set.FeatureSet.readRetryAddress,\
3097 (u8 *)&feature,4);
3098}
3099
3100static int g_sandisk_retry_case = 0; //for new read retry table case 1,2,3,4
3101static void mtk_nand_sandisk_rrtry(struct mtd_info *mtd,flashdev_info_t deviceinfo, u32 feature, bool defValue)
3102{
3103 //u32 feature = deviceinfo.feature_set.FeatureSet.readRetryStart+retryCount;
3104 if(FALSE == defValue)
3105 {
3106 mtk_nand_reset();
3107 }
3108 else
3109 {
3110 mtk_nand_device_reset();
3111 mtk_nand_reset();
3112 //should do NAND DEVICE interface change under sync mode
3113 }
3114
3115 mtk_nand_SetFeature(mtd, deviceinfo.feature_set.FeatureSet.sfeatureCmd,\
3116 deviceinfo.feature_set.FeatureSet.readRetryAddress,\
3117 (u8 *)&feature,4);
3118 if(FALSE == defValue)
3119 {
3120 if(g_sandisk_retry_case > 1) //case 3
3121 {
3122 if(g_sandisk_retry_case == 3)
3123 {
3124 u32 timeout=TIMEOUT_3;
3125 mtk_nand_reset();
3126 DRV_WriteReg(NFI_CNFG_REG16, (CNFG_OP_CUST | CNFG_BYTE_RW));
3127 mtk_nand_set_command(0x5C);
3128 mtk_nand_set_command(0xC5);
3129 mtk_nand_set_command(0x55);
3130 mtk_nand_set_address(0x00, 0, 1, 0); // test mode entry
3131 mtk_nand_status_ready(STA_NFI_OP_MASK);
3132 DRV_WriteReg32(NFI_CON_REG16, 1 << CON_NFI_SEC_SHIFT);
3133 NFI_SET_REG32(NFI_CON_REG16, CON_NFI_BWR);
3134 DRV_WriteReg(NFI_STRDATA_REG16, 0x1);
3135 WAIT_NFI_PIO_READY(timeout);
3136 DRV_WriteReg8(NFI_DATAW_REG32, 0x01);
3137 while(!(DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY_RETURN) && (timeout--));
3138 mtk_nand_reset();
3139 timeout=TIMEOUT_3;
3140 mtk_nand_set_command(0x55);
3141 mtk_nand_set_address(0x23, 0, 1, 0); //changing parameter LMFLGFIX_NEXT = 1 to all die
3142 mtk_nand_status_ready(STA_NFI_OP_MASK);
3143 DRV_WriteReg32(NFI_CON_REG16, 1 << CON_NFI_SEC_SHIFT);
3144 NFI_SET_REG32(NFI_CON_REG16, CON_NFI_BWR);
3145 DRV_WriteReg(NFI_STRDATA_REG16, 0x1);
3146 WAIT_NFI_PIO_READY(timeout);
3147 DRV_WriteReg8(NFI_DATAW_REG32, 0xC0);
3148 while(!(DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY_RETURN) && (timeout--));
3149 mtk_nand_reset();
3150 printk("Case3# Set LMFLGFIX_NEXT=1\n");
3151 }
3152 mtk_nand_set_command(0x25);
3153 printk("Case2#3# Set cmd 25\n");
3154 }
3155 mtk_nand_set_command(deviceinfo.feature_set.FeatureSet.readRetryPreCmd);
3156 }
3157}
3158
3159//sandisk 19nm read retry
3160u16 sandisk_19nm_rr_table[18] =
3161{
3162 0x0000,
3163 0xFF0F, 0xEEFE, 0xDDFD, 0x11EE, //04h[7:4] | 07h[7:4] | 04h[3:0] | 05h[7:4]
3164 0x22ED, 0x33DF, 0xCDDE, 0x01DD,
3165 0x0211, 0x1222, 0xBD21, 0xAD32,
3166 0x9DF0, 0xBCEF, 0xACDC, 0x9CFF,
3167 0x0000 //align
3168};
3169
3170static void sandisk_19nm_rr_init(void)
3171{
3172 u32 reg_val = 0;
3173 u32 count = 0;
3174 u32 timeout = 0xffff;
3175 u32 u4RandomSetting;
3176 u32 acccon;
3177
3178 acccon = DRV_Reg32(NFI_ACCCON_REG32);
3179 DRV_WriteReg32(NFI_ACCCON_REG32, 0x31C08669); //to fit read retry timing
3180
3181 mtk_nand_reset();
3182
3183 reg_val = (CNFG_OP_CUST | CNFG_BYTE_RW);
3184 DRV_WriteReg(NFI_CNFG_REG16, reg_val);
3185 mtk_nand_set_command(0x3B);
3186 mtk_nand_set_command(0xB9);
3187
3188 for(count = 0; count < 9; count++)
3189 {
3190 mtk_nand_set_command(0x53);
3191 mtk_nand_set_address((0x04 + count), 0, 1, 0);
3192 DRV_WriteReg(NFI_CON_REG16, (CON_NFI_BWR | (1 << CON_NFI_SEC_SHIFT)));
3193 DRV_WriteReg(NFI_STRDATA_REG16, 1);
3194 timeout = 0xffff;
3195 WAIT_NFI_PIO_READY(timeout);
3196 DRV_WriteReg32(NFI_DATAW_REG32, 0x00);
3197 mtk_nand_reset();
3198 }
3199
3200 DRV_WriteReg32(NFI_ACCCON_REG32, acccon);
3201}
3202
3203static void sandisk_19nm_rr_loading(u32 retryCount, bool defValue)
3204{
3205 u32 reg_val = 0;
3206 u32 timeout = 0xffff;
3207 u32 acccon;
3208 u8 count;
3209 u8 cmd_reg[4] = {0x4, 0x5, 0x7};
3210 acccon = DRV_Reg32(NFI_ACCCON_REG32);
3211 DRV_WriteReg32(NFI_ACCCON_REG32, 0x31C08669); //to fit read retry timing
3212
3213 mtk_nand_reset();
3214
3215 reg_val = (CNFG_OP_CUST | CNFG_BYTE_RW);
3216 DRV_WriteReg(NFI_CNFG_REG16, reg_val);
3217
3218 if((0 != retryCount) || defValue)
3219 {
3220 mtk_nand_set_command(0xD6);
3221 }
3222
3223 mtk_nand_set_command(0x3B);
3224 mtk_nand_set_command(0xB9);
3225 for(count = 0; count < 3; count++)
3226 {
3227 mtk_nand_set_command(0x53);
3228 mtk_nand_set_address(cmd_reg[count], 0, 1, 0);
3229 DRV_WriteReg(NFI_CON_REG16, (CON_NFI_BWR | (1 << CON_NFI_SEC_SHIFT)));
3230 DRV_WriteReg(NFI_STRDATA_REG16, 1);
3231 timeout = 0xffff;
3232 WAIT_NFI_PIO_READY(timeout);
3233 if(count == 0)
3234 DRV_WriteReg32(NFI_DATAW_REG32, (((sandisk_19nm_rr_table[retryCount] & 0xF000) >> 8) | ((sandisk_19nm_rr_table[retryCount] & 0x00F0) >> 4)));
3235 else if(count == 1)
3236 DRV_WriteReg32(NFI_DATAW_REG32, ((sandisk_19nm_rr_table[retryCount] & 0x000F) << 4));
3237 else if(count == 2)
3238 DRV_WriteReg32(NFI_DATAW_REG32, ((sandisk_19nm_rr_table[retryCount] & 0x0F00) >> 4));
3239
3240 mtk_nand_reset();
3241 }
3242
3243 if(!defValue)
3244 {
3245 mtk_nand_set_command(0xB6);
3246 }
3247
3248 DRV_WriteReg32(NFI_ACCCON_REG32, acccon);
3249}
3250
3251static void mtk_nand_sandisk_19nm_rrtry(struct mtd_info *mtd,flashdev_info_t deviceinfo, u32 retryCount, bool defValue)
3252{
3253 if((retryCount == 0) && (!defValue))
3254 sandisk_19nm_rr_init();
3255 sandisk_19nm_rr_loading(retryCount, defValue);
3256}
3257
3258#define HYNIX_RR_TABLE_SIZE (1026) //hynix read retry table size
3259#define SINGLE_RR_TABLE_SIZE (64)
3260
3261#define READ_RETRY_STEP (devinfo.feature_set.FeatureSet.readRetryCnt + devinfo.feature_set.FeatureSet.readRetryStart) // 8 step or 12 step to fix read retry table
3262#define HYNIX_16NM_RR_TABLE_SIZE ((READ_RETRY_STEP == 12)?(784):(528)) //hynix read retry table size
3263#define SINGLE_RR_TABLE_16NM_SIZE ((READ_RETRY_STEP == 12)?(48):(32))
3264
3265u8 nand_hynix_rr_table[(HYNIX_RR_TABLE_SIZE+16)/16*16]; //align as 16 byte
3266
3267#define NAND_HYX_RR_TBL_BUF nand_hynix_rr_table
3268
3269static u8 real_hynix_rr_table_idx = 0;
3270static u32 g_hynix_retry_count = 0;
3271
3272static bool hynix_rr_table_select(u8 table_index, flashdev_info_t *deviceinfo)
3273{
3274 u32 i;
3275 u32 table_size = (deviceinfo->feature_set.FeatureSet.rtype == RTYPE_HYNIX_16NM)?SINGLE_RR_TABLE_16NM_SIZE : SINGLE_RR_TABLE_SIZE;
3276
3277 for(i = 0; i < table_size; i++)
3278 {
3279 u8 *temp_rr_table = (u8 *)NAND_HYX_RR_TBL_BUF+table_size*table_index*2+2;
3280 u8 *temp_inversed_rr_table = (u8 *)NAND_HYX_RR_TBL_BUF+table_size*table_index*2+table_size+2;
3281 if(deviceinfo->feature_set.FeatureSet.rtype == RTYPE_HYNIX_16NM)
3282 {
3283 temp_rr_table += 14;
3284 temp_inversed_rr_table += 14;
3285 }
3286 if(0xFF != (temp_rr_table[i] ^ temp_inversed_rr_table[i]))
3287 return FALSE; // error table
3288 }
3289// print table
3290 if(deviceinfo->feature_set.FeatureSet.rtype == RTYPE_HYNIX_16NM)
3291 table_size += 16;
3292 else
3293 table_size += 2;
3294 for(i = 0; i < table_size; i++)
3295 {
3296 printk("%02X ", NAND_HYX_RR_TBL_BUF[i]);
3297 if((i + 1)%8 == 0)
3298 printk("\n");
3299 }
3300 return TRUE; // correct table
3301}
3302
3303static void HYNIX_RR_TABLE_READ(flashdev_info_t *deviceinfo)
3304{
3305 u32 reg_val = 0;
3306 u32 read_count = 0, max_count = HYNIX_RR_TABLE_SIZE;
3307 u32 timeout = 0xffff;
3308 u8* rr_table = (u8*)(NAND_HYX_RR_TBL_BUF);
3309 u8 table_index = 0;
3310 u8 add_reg1[3] = {0xFF, 0xCC};
3311 u8 data_reg1[3] = {0x40, 0x4D};
3312 u8 cmd_reg[6] = {0x16, 0x17, 0x04, 0x19, 0x00};
3313 u8 add_reg2[6] = {0x00, 0x00, 0x00, 0x02, 0x00};
3314 bool RR_TABLE_EXIST = TRUE;
3315 if(deviceinfo->feature_set.FeatureSet.rtype == RTYPE_HYNIX_16NM)
3316 {
3317 read_count = 1;
3318 add_reg1[1]= 0x38;
3319 data_reg1[1] = 0x52;
3320 max_count = HYNIX_16NM_RR_TABLE_SIZE;
3321 if(READ_RETRY_STEP == 12)
3322 {
3323 add_reg2[2] = 0x1F;
3324 }
3325 }
3326 mtk_nand_device_reset();
3327 // take care under sync mode. need change nand device inferface xiaolei
3328
3329 mtk_nand_reset();
3330
3331 DRV_WriteReg(NFI_CNFG_REG16, (CNFG_OP_CUST | CNFG_BYTE_RW));
3332
3333 mtk_nand_set_command(0x36);
3334
3335 for(; read_count < 2; read_count++)
3336 {
3337 mtk_nand_set_address(add_reg1[read_count],0,1,0);
3338 DRV_WriteReg(NFI_CON_REG16, (CON_NFI_BWR | (1 << CON_NFI_SEC_SHIFT)));
3339 DRV_WriteReg(NFI_STRDATA_REG16, 1);
3340 timeout = 0xffff;
3341 WAIT_NFI_PIO_READY(timeout);
3342 DRV_WriteReg32(NFI_DATAW_REG32, data_reg1[read_count]);
3343 mtk_nand_reset();
3344 }
3345
3346 for(read_count = 0; read_count < 5; read_count++)
3347 {
3348 mtk_nand_set_command(cmd_reg[read_count]);
3349 }
3350 for(read_count = 0; read_count < 5; read_count++)
3351 {
3352 mtk_nand_set_address(add_reg2[read_count],0,1,0);
3353 }
3354 mtk_nand_set_command(0x30);
3355 DRV_WriteReg(NFI_CNRNB_REG16, 0xF1);
3356 timeout = 0xffff;
3357 while(!(DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY_RETURN) && (timeout--));
3358
3359 reg_val = (CNFG_OP_CUST | CNFG_BYTE_RW | CNFG_READ_EN);
3360 DRV_WriteReg(NFI_CNFG_REG16, reg_val);
3361 DRV_WriteReg(NFI_CON_REG16, (CON_NFI_BRD | (2<< CON_NFI_SEC_SHIFT)));
3362 DRV_WriteReg(NFI_STRDATA_REG16, 0x1);
3363 timeout = 0xffff;
3364 read_count = 0; // how????
3365 while ((read_count < max_count) && timeout )
3366 {
3367 WAIT_NFI_PIO_READY(timeout);
3368 *rr_table++ = (U8)DRV_Reg32(NFI_DATAR_REG32);
3369 read_count++;
3370 timeout = 0xFFFF;
3371 }
3372
3373 mtk_nand_device_reset();
3374 // take care under sync mode. need change nand device inferface xiaolei
3375
3376 reg_val = (CNFG_OP_CUST | CNFG_BYTE_RW);
3377 if(deviceinfo->feature_set.FeatureSet.rtype == RTYPE_HYNIX_16NM)
3378 {
3379 DRV_WriteReg(NFI_CNFG_REG16, reg_val);
3380 mtk_nand_set_command(0x36);
3381 mtk_nand_set_address(0x38,0,1,0);
3382 DRV_WriteReg(NFI_CON_REG16, (CON_NFI_BWR | (1 << CON_NFI_SEC_SHIFT)));
3383 DRV_WriteReg(NFI_STRDATA_REG16, 1);
3384 WAIT_NFI_PIO_READY(timeout);
3385 DRV_WriteReg32(NFI_DATAW_REG32, 0x00);
3386 mtk_nand_reset();
3387 mtk_nand_set_command(0x16);
3388 mtk_nand_set_command(0x00);
3389 mtk_nand_set_address(0x00,0,1,0);//dummy read, add don't care
3390 mtk_nand_set_command(0x30);
3391 }else
3392 {
3393 DRV_WriteReg(NFI_CNFG_REG16, reg_val);
3394 mtk_nand_set_command(0x38);
3395 }
3396 timeout = 0xffff;
3397 while(!(DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY_RETURN) && (timeout--));
3398 rr_table = (u8*)(NAND_HYX_RR_TBL_BUF);
3399 if(deviceinfo->feature_set.FeatureSet.rtype == RTYPE_HYNIX)
3400 {
3401 if((rr_table[0] != 8) || (rr_table[1] != 8))
3402 {
3403 RR_TABLE_EXIST = FALSE;
3404 ASSERT(0);
3405 }
3406 }
3407 else if(deviceinfo->feature_set.FeatureSet.rtype == RTYPE_HYNIX_16NM)
3408 {
3409 for(read_count=0;read_count<8;read_count++)
3410 {
3411 if((rr_table[read_count] != 8) || (rr_table[read_count+8] != 4))
3412 {
3413 RR_TABLE_EXIST = FALSE;
3414 break;
3415 }
3416 }
3417 }
3418 if(RR_TABLE_EXIST)
3419 {
3420 for(table_index = 0 ;table_index < 8; table_index++)
3421 {
3422 if(hynix_rr_table_select(table_index, deviceinfo))
3423 {
3424 real_hynix_rr_table_idx = table_index;
3425 MSG(INIT, "Hynix rr_tbl_id %d\n",real_hynix_rr_table_idx);
3426 break;
3427 }
3428 }
3429 if(table_index == 8)
3430 {
3431 ASSERT(0);
3432 }
3433 }
3434 else
3435 {
3436 MSG(INIT, "Hynix RR table index error!\n");
3437 }
3438}
3439
3440static void HYNIX_Set_RR_Para(u32 rr_index, flashdev_info_t *deviceinfo)
3441{
3442 u32 reg_val = 0;
3443 u32 timeout=0xffff;
3444 u8 count, max_count = 8;
3445 u8 add_reg[9] = {0xCC, 0xBF, 0xAA, 0xAB, 0xCD, 0xAD, 0xAE, 0xAF};
3446 u8 *hynix_rr_table = (u8 *)NAND_HYX_RR_TBL_BUF+SINGLE_RR_TABLE_SIZE*real_hynix_rr_table_idx*2+2;
3447 if(deviceinfo->feature_set.FeatureSet.rtype == RTYPE_HYNIX_16NM)
3448 {
3449 add_reg[0] = 0x38; //0x38, 0x39, 0x3A, 0x3B
3450 for(count =1; count < 4; count++)
3451 {
3452 add_reg[count] = add_reg[0] + count;
3453 }
3454 hynix_rr_table += 14;
3455 max_count = 4;
3456 }
3457 mtk_nand_reset();
3458
3459 DRV_WriteReg(NFI_CNFG_REG16, (CNFG_OP_CUST | CNFG_BYTE_RW));
3460 //mtk_nand_set_command(0x36);
3461
3462 for(count = 0; count < max_count; count++)
3463 {
3464 mtk_nand_set_command(0x36);
3465 mtk_nand_set_address(add_reg[count], 0, 1, 0);
3466 DRV_WriteReg(NFI_CON_REG16, (CON_NFI_BWR | (1 << CON_NFI_SEC_SHIFT)));
3467 DRV_WriteReg(NFI_STRDATA_REG16, 1);
3468 timeout = 0xffff;
3469 WAIT_NFI_PIO_READY(timeout);
3470 if(timeout == 0)
3471 {
3472 printk("HYNIX_Set_RR_Para timeout\n");
3473 break;
3474 }
3475 DRV_WriteReg32(NFI_DATAW_REG32, hynix_rr_table[rr_index*max_count + count]);
3476 mtk_nand_reset();
3477 }
3478 mtk_nand_set_command(0x16);
3479}
3480
3481static void HYNIX_Get_RR_Para(u32 rr_index, flashdev_info_t *deviceinfo)
3482{
3483 u32 reg_val = 0;
3484 u32 timeout=0xffff;
3485 u8 count, max_count = 8;
3486 u8 add_reg[9] = {0xCC, 0xBF, 0xAA, 0xAB, 0xCD, 0xAD, 0xAE, 0xAF};
3487 u8 *hynix_rr_table = (u8 *)NAND_HYX_RR_TBL_BUF+SINGLE_RR_TABLE_SIZE*real_hynix_rr_table_idx*2+2;
3488 if(deviceinfo->feature_set.FeatureSet.rtype == RTYPE_HYNIX_16NM)
3489 {
3490 add_reg[0] = 0x38; //0x38, 0x39, 0x3A, 0x3B
3491 for(count =1; count < 4; count++)
3492 {
3493 add_reg[count] = add_reg[0] + count;
3494 }
3495 hynix_rr_table += 14;
3496 max_count = 4;
3497 }
3498 mtk_nand_reset();
3499
3500 DRV_WriteReg(NFI_CNFG_REG16, (CNFG_OP_CUST | CNFG_BYTE_RW | CNFG_READ_EN));
3501 //mtk_nand_set_command(0x37);
3502
3503 for(count = 0; count < max_count; count++)
3504 {
3505 mtk_nand_set_command(0x37);
3506 mtk_nand_set_address(add_reg[count], 0, 1, 0);
3507
3508 DRV_WriteReg(NFI_CON_REG16, (CON_NFI_SRD | (1 << CON_NFI_NOB_SHIFT)));
3509 DRV_WriteReg(NFI_STRDATA_REG16, 1);
3510
3511 timeout = 0xffff;
3512 WAIT_NFI_PIO_READY(timeout);
3513 if(timeout == 0)
3514 {
3515 printk("HYNIX_Get_RR_Para timeout\n");
3516 }
3517 //DRV_WriteReg32(NFI_DATAW_REG32, hynix_rr_table[rr_index*max_count + count]);
3518 printk("Get[%02X]%02X\n",add_reg[count], DRV_Reg8(NFI_DATAR_REG32));
3519 mtk_nand_reset();
3520 }
3521}
3522
3523static void mtk_nand_hynix_rrtry(struct mtd_info *mtd, flashdev_info_t deviceinfo, u32 retryCount, bool defValue)
3524{
3525 if(defValue == FALSE)
3526 {
3527 if(g_hynix_retry_count == READ_RETRY_STEP)
3528 {
3529 g_hynix_retry_count = 0;
3530 }
3531 printk("Hynix Retry %d\n", g_hynix_retry_count);
3532 HYNIX_Set_RR_Para(g_hynix_retry_count, &deviceinfo);
3533 //HYNIX_Get_RR_Para(g_hynix_retry_count, &deviceinfo);
3534 g_hynix_retry_count ++;
3535 }
3536}
3537
3538static void mtk_nand_hynix_16nm_rrtry(struct mtd_info *mtd, flashdev_info_t deviceinfo, u32 retryCount, bool defValue)
3539{
3540 if(defValue == FALSE)
3541 {
3542 if(g_hynix_retry_count == READ_RETRY_STEP)
3543 {
3544 g_hynix_retry_count = 0;
3545 }
3546 printk("Hynix 16nm Retry %d\n", g_hynix_retry_count);
3547 HYNIX_Set_RR_Para(g_hynix_retry_count, &deviceinfo);
3548 //mb();
3549 //HYNIX_Get_RR_Para(g_hynix_retry_count, &deviceinfo);
3550 g_hynix_retry_count ++;
3551
3552 }
3553}
3554
3555// sandisk 1y nm
3556u32 special_rrtry_setting[36]=
3557{
35580x00000000,0x7C00007C,0x787C0004,0x74780078,
35590x7C007C08,0x787C7C00,0x74787C7C,0x70747C00,
35600x7C007800,0x787C7800,0x74787800,0x70747800,
35610x6C707800,0x00040400,0x7C000400,0x787C040C,
35620x7478040C,0x7C000810,0x00040810,0x04040C0C,
35630x00040C10,0x00081014,0x000C1418,0x7C040C0C,
35640x74787478,0x70747478,0x6C707478,0x686C7478,
35650x74787078,0x70747078,0x686C7078,0x6C707078,
35660x6C706C78,0x686C6C78,0x64686C78,0x686C6874,
35670x64686874,
3568};
3569
3570static u32 mtk_nand_rrtry_setting(flashdev_info_t deviceinfo, enum readRetryType type, u32 retryStart, u32 loopNo)
3571{
3572 u32 value;
3573 //if(RTYPE_MICRON == type || RTYPE_SANDISK== type || RTYPE_TOSHIBA== type || RTYPE_HYNIX== type)
3574 {
3575 if(retryStart != 0xFFFFFFFF)
3576 {
3577 value = retryStart+loopNo;
3578 }
3579 else
3580 {
3581 value = special_rrtry_setting[loopNo];
3582 }
3583 }
3584
3585 return value;
3586}
3587
3588typedef u32 (*rrtryFunctionType)(struct mtd_info *mtd,flashdev_info_t deviceinfo, u32 feature, bool defValue);
3589
3590static rrtryFunctionType rtyFuncArray[]=
3591{
3592 mtk_nand_micron_rrtry,
3593 mtk_nand_sandisk_rrtry,
3594 mtk_nand_sandisk_19nm_rrtry,
3595 mtk_nand_toshiba_rrtry,
3596 mtk_nand_hynix_rrtry,
3597 mtk_nand_hynix_16nm_rrtry
3598};
3599
3600
3601static void mtk_nand_rrtry_func(struct mtd_info *mtd,flashdev_info_t deviceinfo, u32 feature, bool defValue)
3602{
3603 rtyFuncArray[deviceinfo.feature_set.FeatureSet.rtype](mtd,deviceinfo, feature,defValue);
3604}
3605
3606/******************************************************************************
3607 * mtk_nand_exec_read_page
3608 *
3609 * DESCRIPTION:
3610 * Read a page data !
3611 *
3612 * PARAMETERS:
3613 * struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize,
3614 * u8* pPageBuf, u8* pFDMBuf
3615 *
3616 * RETURNS:
3617 * None
3618 *
3619 * NOTES:
3620 * None
3621 *
3622 ******************************************************************************/
3623int mtk_nand_exec_read_page(struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize, u8 * pPageBuf, u8 * pFDMBuf)
3624{
3625 u8 *buf;
3626 int bRet = ERR_RTN_SUCCESS;
3627 struct nand_chip *nand = mtd->priv;
3628 u32 u4SecNum = u4PageSize >> host->hw->nand_sec_shift;
3629 u32 backup_corrected, backup_failed;
3630 bool readRetry = FALSE;
3631 int retryCount = 0;
3632 u32 val;
3633 u32 tempBitMap, bitMap, i;
3634#ifdef NAND_PFM
3635 struct timeval pfm_time_read;
3636#endif
3637#if 0
3638 unsigned short PageFmt_Reg = 0;
3639 unsigned int NAND_ECC_Enc_Reg = 0;
3640 unsigned int NAND_ECC_Dec_Reg = 0;
3641#endif
3642 //MSG(INIT, "mtk_nand_exec_read_page, host->hw->nand_sec_shift: %d\n", host->hw->nand_sec_shift);
3643 //MSG(INIT, "mtk_nand_exec_read_page,u4RowAddr: 0x%x\n", u4RowAddr);
3644 PFM_BEGIN(pfm_time_read);
3645 tempBitMap = 0;
3646
3647 if (((u32) pPageBuf % 16) && local_buffer_16_align)
3648 {
3649 buf = local_buffer_16_align;
3650 } else
3651 {
3652 if(virt_addr_valid (pPageBuf)==0)
3653 { // It should be allocated by vmalloc
3654 buf = local_buffer_16_align;
3655 }
3656 else
3657 {
3658 buf = pPageBuf;
3659 }
3660 }
3661 backup_corrected = mtd->ecc_stats.corrected;
3662 backup_failed = mtd->ecc_stats.failed;
3663
3664
3665#if CFG_2CS_NAND
3666 if (g_bTricky_CS)
3667 {
3668 u4RowAddr = mtk_nand_cs_on(nand, NFI_TRICKY_CS, u4RowAddr);
3669 }
3670#endif
3671
3672 do{
3673 if(use_randomizer && u4RowAddr >= RAND_START_ADDR)
3674 { mtk_nand_turn_on_randomizer(u4RowAddr, 0, 0);}
3675 else if(pre_randomizer && u4RowAddr < RAND_START_ADDR)
3676 { mtk_nand_turn_on_randomizer(u4RowAddr, 0, 0);}
3677 if (mtk_nand_ready_for_read(nand, u4RowAddr, 0, u4SecNum, true, buf))
3678 {
3679 if (!mtk_nand_read_page_data(mtd, buf, u4PageSize))
3680 {
3681 MSG(INIT, "mtk_nand_read_page_data fail\n");
3682 bRet = ERR_RTN_FAIL;
3683 }
3684
3685 if (!mtk_nand_status_ready(STA_NAND_BUSY))
3686 {
3687 MSG(INIT, "mtk_nand_status_ready fail\n");
3688 bRet = ERR_RTN_FAIL;
3689 }
3690 if (g_bHwEcc)
3691 {
3692 if (!mtk_nand_check_dececc_done(u4SecNum))
3693 {
3694 MSG(INIT, "mtk_nand_check_dececc_done fail\n");
3695 bRet = ERR_RTN_FAIL;
3696 }
3697 }
3698 mtk_nand_read_fdm_data(pFDMBuf, u4SecNum);
3699 if (g_bHwEcc)
3700 {
3701 if (!mtk_nand_check_bch_error(mtd, buf, pFDMBuf,u4SecNum - 1, u4RowAddr, &tempBitMap))
3702 {
3703 if(devinfo.vendor != VEND_NONE){
3704 readRetry = TRUE;
3705 }
3706 MSG(INIT, "mtk_nand_check_bch_error fail, retryCount:%d\n",retryCount);
3707 bRet = ERR_RTN_BCH_FAIL;
3708 }
3709 else
3710 {
3711 if(0 != (DRV_Reg32(NFI_STA_REG32) & STA_READ_EMPTY)) // if empty
3712 {
3713 if(retryCount != 0)
3714 {
3715 MSG(INIT,"NFI read retry read empty page, return as uncorrectable\n");
3716 mtd->ecc_stats.failed+=u4SecNum;
3717 bRet = ERR_RTN_BCH_FAIL;
3718 }
3719 }
3720 }
3721 }
3722 mtk_nand_stop_read();
3723 }
3724 if(use_randomizer && u4RowAddr >= RAND_START_ADDR)
3725 { mtk_nand_turn_off_randomizer();}
3726 else if(pre_randomizer && u4RowAddr < RAND_START_ADDR)
3727 { mtk_nand_turn_off_randomizer();}
3728#if 0
3729 if (bRet == ERR_RTN_BCH_FAIL)
3730 {
3731 tempBitMap -= (tempBitMap&bitMap);
3732 if(tempBitMap != 0)
3733 {
3734 MSG(INIT, "read retry has partial data correct 0x%x\n",tempBitMap);
3735 for(i = 0; i < u4SecNum; i++)
3736 {
3737 if((tempBitMap & (1 << i)) != 0)
3738 {
3739 memcpy((temp_buffer_16_align+(u4SecSize*i)),(buf+(u4SecSize*i)),u4SecSize);
3740 memcpy((temp_buffer_16_align+mtd->writesize+(8*i)),(pFDMBuf+(8*i)),8);
3741 }
3742 }
3743 bitMap |= tempBitMap;
3744 }
3745 if(bitMap == ((1 << u4SecNum) - 1))
3746 {
3747 MSG(INIT, "read retry has reformat the page data correctly @ page 0x%x\n",u4RowAddr);
3748 memcpy(buf,temp_buffer_16_align,mtd->writesize);
3749 memcpy(pFDMBuf,(temp_buffer_16_align+mtd->writesize),8*u4SecNum);
3750 mtd->ecc_stats.corrected++;
3751 mtd->ecc_stats.failed = backup_failed;
3752 bRet = ERR_RTN_SUCCESS;
3753 }
3754 }
3755#endif
3756 if (bRet == ERR_RTN_BCH_FAIL)
3757 {
3758 u32 feature ;
3759 tempBitMap = 0;
3760 //feature= devinfo.feature_set.FeatureSet.readRetryStart+retryCount;
3761 feature = mtk_nand_rrtry_setting(devinfo, devinfo.feature_set.FeatureSet.rtype,devinfo.feature_set.FeatureSet.readRetryStart,retryCount);
3762 if(retryCount < devinfo.feature_set.FeatureSet.readRetryCnt)
3763 {
3764 mtd->ecc_stats.corrected = backup_corrected;
3765 mtd->ecc_stats.failed = backup_failed;
3766 mtk_nand_rrtry_func(mtd,devinfo,feature,FALSE);
3767 retryCount++;
3768 }
3769 else
3770 {
3771 feature = devinfo.feature_set.FeatureSet.readRetryDefault;
3772 // sandisk case 2/3/4
3773 if((devinfo.feature_set.FeatureSet.rtype == RTYPE_SANDISK) && (g_sandisk_retry_case < 3))
3774 {
3775 g_sandisk_retry_case++;
3776 printk("Sandisk read retry case#%d\n", g_sandisk_retry_case);
3777 tempBitMap = 0;
3778 mtd->ecc_stats.corrected = backup_corrected;
3779 mtd->ecc_stats.failed = backup_failed;
3780 mtk_nand_rrtry_func(mtd,devinfo,feature,FALSE);
3781 //if((g_sandisk_retry_case == 0) || (g_sandisk_retry_case == 2))
3782 //{
3783 // mtk_nand_set_command(0x26);
3784 //}
3785 retryCount = 0;
3786 }else
3787 {
3788 mtk_nand_rrtry_func(mtd,devinfo,feature,TRUE);
3789 readRetry = FALSE;
3790 g_sandisk_retry_case = 0;
3791 }
3792 }
3793 if((g_sandisk_retry_case == 1) || (g_sandisk_retry_case == 3))
3794 {
3795 mtk_nand_set_command(0x26);
3796 printk("Case1#3# Set cmd 26\n");
3797 }
3798 }
3799 else
3800 {
3801 if((retryCount != 0) && MLC_DEVICE)
3802 {
3803 u32 feature = devinfo.feature_set.FeatureSet.readRetryDefault;
3804 mtk_nand_rrtry_func(mtd,devinfo,feature,TRUE);
3805 }
3806 readRetry = FALSE;
3807 g_sandisk_retry_case = 0;
3808 }
3809 if(TRUE == readRetry)
3810 bRet = ERR_RTN_SUCCESS;
3811 }while(readRetry);
3812 if(retryCount != 0)
3813 {
3814 u32 feature = devinfo.feature_set.FeatureSet.readRetryDefault;
3815 if(bRet == ERR_RTN_SUCCESS)
3816 {
3817 MSG(INIT, "u4RowAddr:0x%x read retry pass, retrycnt:%d ENUM0:%x,ENUM1:%x,mtd_ecc(A):%x,mtd_ecc(B):%x \n",u4RowAddr,retryCount,DRV_Reg32(ECC_DECENUM1_REG32),DRV_Reg32(ECC_DECENUM0_REG32),mtd->ecc_stats.failed,backup_failed);
3818 mtd->ecc_stats.corrected++;
3819 if((devinfo.feature_set.FeatureSet.rtype == RTYPE_HYNIX_16NM) || (devinfo.feature_set.FeatureSet.rtype == RTYPE_HYNIX))
3820 {
3821 g_hynix_retry_count--;
3822 }
3823 }
3824 else
3825 {
3826 MSG(INIT, "u4RowAddr:0x%x read retry fail, mtd_ecc(A):%x ,fail, mtd_ecc(B):%x\n",u4RowAddr,mtd->ecc_stats.failed,backup_failed);
3827 }
3828 mtk_nand_rrtry_func(mtd,devinfo,feature,TRUE);
3829 g_sandisk_retry_case = 0;
3830 }
3831
3832 if (buf == local_buffer_16_align)
3833 {
3834 memcpy(pPageBuf, buf, u4PageSize);
3835 }
3836 if(bRet != ERR_RTN_SUCCESS)
3837 {
3838 MSG(INIT,"ECC uncorrectable , fake buffer returned\n");
3839 memset(pPageBuf,0xff,u4PageSize);
3840 memset(pFDMBuf,0xff,u4SecNum*8);
3841 }
3842
3843 PFM_END_R(pfm_time_read, u4PageSize + 32);
3844
3845 return bRet;
3846}
3847
3848bool mtk_nand_exec_read_sector(struct mtd_info *mtd, u32 u4RowAddr, u32 u4ColAddr, u32 u4PageSize, u8 * pPageBuf, u8 * pFDMBuf, int subpageno)
3849{
3850 u8 *buf;
3851 int bRet = ERR_RTN_SUCCESS;
3852 struct nand_chip *nand = mtd->priv;
3853 u32 u4SecNum = subpageno;
3854 u32 backup_corrected, backup_failed;
3855 bool readRetry = FALSE;
3856 int retryCount = 0;
3857 u32 tempBitMap;
3858#ifdef NAND_PFM
3859 struct timeval pfm_time_read;
3860#endif
3861#if 0
3862 unsigned short PageFmt_Reg = 0;
3863 unsigned int NAND_ECC_Enc_Reg = 0;
3864 unsigned int NAND_ECC_Dec_Reg = 0;
3865#endif
3866 //MSG(INIT, "mtk_nand_exec_read_page, host->hw->nand_sec_shift: %d\n", host->hw->nand_sec_shift);
3867
3868 PFM_BEGIN(pfm_time_read);
3869
3870 if (((u32) pPageBuf % 16) && local_buffer_16_align)
3871 {
3872 buf = local_buffer_16_align;
3873 } else
3874 {
3875 if(virt_addr_valid (pPageBuf)==0)
3876 { // It should be allocated by vmalloc
3877 buf = local_buffer_16_align;
3878 }
3879 else
3880 {
3881 buf = pPageBuf;
3882 }
3883 }
3884 backup_corrected = mtd->ecc_stats.corrected;
3885 backup_failed = mtd->ecc_stats.failed;
3886#if CFG_2CS_NAND
3887 if (g_bTricky_CS)
3888 {
3889 u4RowAddr = mtk_nand_cs_on(nand, NFI_TRICKY_CS, u4RowAddr);
3890 }
3891#endif
3892 do{
3893 if(use_randomizer && u4RowAddr >= RAND_START_ADDR)
3894 { mtk_nand_turn_on_randomizer(u4RowAddr, 0, 0);}
3895 else if(pre_randomizer && u4RowAddr < RAND_START_ADDR)
3896 { mtk_nand_turn_on_randomizer(u4RowAddr, 0, 0);}
3897 if (mtk_nand_ready_for_read(nand, u4RowAddr, u4ColAddr, u4SecNum, true, buf))
3898 {
3899 if (!mtk_nand_read_page_data(mtd, buf, u4PageSize))
3900 {
3901 MSG(INIT, "mtk_nand_read_page_data fail\n");
3902 bRet = ERR_RTN_FAIL;
3903 }
3904
3905 if (!mtk_nand_status_ready(STA_NAND_BUSY))
3906 {
3907 MSG(INIT, "mtk_nand_status_ready fail\n");
3908 bRet = ERR_RTN_FAIL;
3909 }
3910 if (g_bHwEcc)
3911 {
3912 if (!mtk_nand_check_dececc_done(u4SecNum))
3913 {
3914 MSG(INIT, "mtk_nand_check_dececc_done fail\n");
3915 bRet = ERR_RTN_FAIL;
3916 }
3917 }
3918 mtk_nand_read_fdm_data(pFDMBuf, u4SecNum);
3919 if (g_bHwEcc)
3920 {
3921 if (!mtk_nand_check_bch_error(mtd, buf, pFDMBuf,u4SecNum - 1, u4RowAddr, NULL))
3922 {
3923 if(devinfo.vendor != VEND_NONE){
3924 readRetry = TRUE;
3925 }
3926 MSG(INIT, "mtk_nand_check_bch_error fail, retryCount:%d\n",retryCount);
3927 bRet = ERR_RTN_BCH_FAIL;
3928 }
3929 else
3930 {
3931 if(0 != (DRV_Reg32(NFI_STA_REG32) & STA_READ_EMPTY)) // if empty
3932 {
3933 if(retryCount != 0)
3934 {
3935 MSG(INIT,"NFI read retry read empty page, return as uncorrectable\n");
3936 mtd->ecc_stats.failed+=u4SecNum;
3937 bRet = ERR_RTN_BCH_FAIL;
3938 }
3939 }
3940 }
3941 }
3942 mtk_nand_stop_read();
3943 }
3944 if(use_randomizer && u4RowAddr >= RAND_START_ADDR)
3945 { mtk_nand_turn_off_randomizer();}
3946 else if(pre_randomizer && u4RowAddr < RAND_START_ADDR)
3947 { mtk_nand_turn_off_randomizer();}
3948 if (bRet == ERR_RTN_BCH_FAIL)
3949 {
3950 u32 feature = mtk_nand_rrtry_setting(devinfo, devinfo.feature_set.FeatureSet.rtype,devinfo.feature_set.FeatureSet.readRetryStart,retryCount);
3951 if(retryCount < devinfo.feature_set.FeatureSet.readRetryCnt)
3952 {
3953 mtd->ecc_stats.corrected = backup_corrected;
3954 mtd->ecc_stats.failed = backup_failed;
3955 mtk_nand_rrtry_func(mtd,devinfo,feature,FALSE);
3956 retryCount++;
3957 }
3958 else
3959 {
3960 feature = devinfo.feature_set.FeatureSet.readRetryDefault;
3961 // sandisk case 2/3/4
3962 if((devinfo.feature_set.FeatureSet.rtype == RTYPE_SANDISK) && (g_sandisk_retry_case < 3))
3963 {
3964 g_sandisk_retry_case++;
3965 printk("Sandisk read retry case#%d\n", g_sandisk_retry_case);
3966 tempBitMap = 0;
3967 mtd->ecc_stats.corrected = backup_corrected;
3968 mtd->ecc_stats.failed = backup_failed;
3969 mtk_nand_rrtry_func(mtd,devinfo,feature,FALSE);
3970 //if((g_sandisk_retry_case == 0) || (g_sandisk_retry_case == 2))
3971 //{
3972 // mtk_nand_set_command(0x26);
3973 //}
3974 retryCount = 0;
3975 }else
3976 {
3977 mtk_nand_rrtry_func(mtd,devinfo,feature,TRUE);
3978 readRetry = FALSE;
3979 g_sandisk_retry_case = 0;
3980 }
3981 }
3982 if((g_sandisk_retry_case == 1) || (g_sandisk_retry_case == 3))
3983 {
3984 mtk_nand_set_command(0x26);
3985 printk("Case1#3# Set cmd 26\n");
3986 }
3987 }
3988 else
3989 {
3990 if((retryCount != 0) && MLC_DEVICE)
3991 {
3992 u32 feature = devinfo.feature_set.FeatureSet.readRetryDefault;
3993 mtk_nand_rrtry_func(mtd,devinfo,feature,TRUE);
3994 }
3995 readRetry = FALSE;
3996 g_sandisk_retry_case = 0;
3997 }
3998 if(TRUE == readRetry)
3999 bRet = ERR_RTN_SUCCESS;
4000 }while(readRetry);
4001 if(retryCount != 0)
4002 {
4003 u32 feature = devinfo.feature_set.FeatureSet.readRetryDefault;
4004 if(bRet == ERR_RTN_SUCCESS)
4005 {
4006 MSG(INIT, "u4RowAddr:0x%x read retry pass, retrycnt:%d ENUM0:%x,ENUM1:%x,\n",u4RowAddr,retryCount,DRV_Reg32(ECC_DECENUM1_REG32),DRV_Reg32(ECC_DECENUM0_REG32));
4007 mtd->ecc_stats.corrected++;
4008 if((devinfo.feature_set.FeatureSet.rtype == RTYPE_HYNIX_16NM) || (devinfo.feature_set.FeatureSet.rtype == RTYPE_HYNIX))
4009 {
4010 g_hynix_retry_count--;
4011 }
4012 }
4013 mtk_nand_rrtry_func(mtd,devinfo,feature,TRUE);
4014 g_sandisk_retry_case = 0;
4015 }
4016 if (buf == local_buffer_16_align)
4017 memcpy(pPageBuf, buf, u4PageSize);
4018
4019 PFM_END_R(pfm_time_read, u4PageSize + 32);
4020 if(bRet != ERR_RTN_SUCCESS)
4021 {
4022 MSG(INIT,"ECC uncorrectable , fake buffer returned\n");
4023 memset(pPageBuf,0xff,u4PageSize);
4024 memset(pFDMBuf,0xff,u4SecNum*8);
4025 }
4026 return bRet;
4027}
4028
4029/******************************************************************************
4030 * mtk_nand_exec_write_page
4031 *
4032 * DESCRIPTION:
4033 * Write a page data !
4034 *
4035 * PARAMETERS:
4036 * struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize,
4037 * u8* pPageBuf, u8* pFDMBuf
4038 *
4039 * RETURNS:
4040 * None
4041 *
4042 * NOTES:
4043 * None
4044 *
4045 ******************************************************************************/
4046int mtk_nand_exec_write_page(struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize, u8 * pPageBuf, u8 * pFDMBuf)
4047{
4048 struct nand_chip *chip = mtd->priv;
4049 u32 u4SecNum = u4PageSize >> host->hw->nand_sec_shift;
4050 u8 *buf;
4051 u8 status;
4052#ifdef PWR_LOSS_SPOH
4053 u32 time;
4054 struct timeval pl_time_write;
4055 suseconds_t duration;
4056#endif
4057#if 0
4058 {
4059 val = devinfo.feature_set.FeatureSet.readRetryDefault;
4060 mtk_nand_SetFeature(mtd, devinfo.feature_set.FeatureSet.sfeatureCmd,\
4061 devinfo.feature_set.FeatureSet.readRetryAddress,\
4062 (u8 *)&val,4);
4063 mtk_nand_GetFeature(mtd, devinfo.feature_set.FeatureSet.gfeatureCmd,\
4064 devinfo.feature_set.FeatureSet.readRetryAddress,\
4065 (u8 *)&val,4);
4066 if((val&0xFF) != (devinfo.feature_set.FeatureSet.readRetryDefault&0xFF))
4067 {
4068 MSG(INIT, "mtk_nand_exec_write_page check read retry defalut value fail 0x%x\n",val);
4069 }
4070 }
4071#endif
4072 //MSG(INIT, "mtk_nand_exec_write_page, page: 0x%x\n", u4RowAddr);
4073#if CFG_2CS_NAND
4074 if (g_bTricky_CS)
4075 {
4076 u4RowAddr = mtk_nand_cs_on(chip, NFI_TRICKY_CS, u4RowAddr);
4077 }
4078#endif
4079
4080 if(use_randomizer && u4RowAddr >= RAND_START_ADDR)
4081 { mtk_nand_turn_on_randomizer(u4RowAddr, 1, 0);}
4082 else if(pre_randomizer && u4RowAddr < RAND_START_ADDR)
4083 { mtk_nand_turn_on_randomizer(u4RowAddr, 1, 0);}
4084
4085#ifdef _MTK_NAND_DUMMY_DRIVER_
4086 if (dummy_driver_debug)
4087 {
4088 unsigned long long time = sched_clock();
4089 if (!((time * 123 + 59) % 32768))
4090 {
4091 printk(KERN_INFO "[NAND_DUMMY_DRIVER] Simulate write error at page: 0x%x\n", u4RowAddr);
4092 return -EIO;
4093 }
4094 }
4095#endif
4096
4097#ifdef NAND_PFM
4098 struct timeval pfm_time_write;
4099#endif
4100 PFM_BEGIN(pfm_time_write);
4101 if (((u32) pPageBuf % 16) && local_buffer_16_align)
4102 {
4103 printk(KERN_INFO "Data buffer not 16 bytes aligned: %p\n", pPageBuf);
4104 memcpy(local_buffer_16_align, pPageBuf, mtd->writesize);
4105 buf = local_buffer_16_align;
4106 }
4107 else
4108 {
4109 if(virt_addr_valid (pPageBuf)==0)
4110 { // It should be allocated by vmalloc
4111 memcpy(local_buffer_16_align, pPageBuf, mtd->writesize);
4112 buf = local_buffer_16_align;
4113 }
4114 else
4115 {
4116 buf = pPageBuf;
4117 }
4118 }
4119
4120 if (mtk_nand_ready_for_write(chip, u4RowAddr, 0, true, buf))
4121 {
4122 mtk_nand_write_fdm_data(chip, pFDMBuf, u4SecNum);
4123 (void)mtk_nand_write_page_data(mtd, buf, u4PageSize);
4124 (void)mtk_nand_check_RW_count(u4PageSize);
4125 mtk_nand_stop_write();
4126 PL_NAND_BEGIN(pl_time_write);
4127 PL_TIME_RAND_PROG(chip, u4RowAddr, time);
4128 (void)mtk_nand_set_command(NAND_CMD_PAGEPROG);
4129 PL_NAND_RESET(time);
4130 {
4131 #if CFG_PERFLOG_DEBUG
4132 struct timeval stimer,etimer;
4133 do_gettimeofday(&stimer);
4134 #endif
4135 while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY) ;
4136 #if CFG_PERFLOG_DEBUG
4137 do_gettimeofday(&etimer);
4138 //printk("[Bean]Cal_timediff(&etimer,&stimer):0x%x\n", Cal_timediff(&etimer,&stimer));
4139 g_NandPerfLog.WriteBusyTotalTime+= Cal_timediff(&etimer,&stimer);
4140 g_NandPerfLog.WriteBusyCount++;
4141 #endif
4142 }
4143 }
4144 else
4145 {
4146 printk("[Bean]mtk_nand_ready_for_write fail!\n");
4147 }
4148 PL_NAND_END(pl_time_write, duration);
4149 PL_TIME_PROG(duration);
4150 PFM_END_W(pfm_time_write, u4PageSize + 32);
4151
4152 if(use_randomizer && u4RowAddr >= RAND_START_ADDR)
4153 { mtk_nand_turn_off_randomizer();}
4154 else if(pre_randomizer && u4RowAddr < RAND_START_ADDR)
4155 { mtk_nand_turn_off_randomizer();}
4156 status = chip->waitfunc(mtd, chip);
4157 //printk("[Bean]status:%d\n", status);
4158 if (status & NAND_STATUS_FAIL)
4159 return -EIO;
4160 else
4161 return 0;
4162}
4163
4164/******************************************************************************
4165 *
4166 * Write a page to a logical address
4167 *
4168 *****************************************************************************/
4169static int mtk_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
4170 uint32_t offset, int data_len, const uint8_t *buf,
4171 int oob_required, int page, int cached, int raw)
4172{
4173// int block_size = 1 << (chip->phys_erase_shift);
4174 int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
4175 u32 block;
4176 u32 page_in_block;
4177 u32 mapped_block;
4178#if CFG_PERFLOG_DEBUG
4179 struct timeval stimer,etimer;
4180 do_gettimeofday(&stimer);
4181#endif
4182 page_in_block = mtk_nand_page_transform(mtd,chip,page,&block,&mapped_block);
4183 //MSG(INIT,"[WRITE] %d, %d, %d %d\n",mapped_block, block, page_in_block, page_per_block);
4184 // write bad index into oob
4185 if (mapped_block != block)
4186 {
4187 set_bad_index_to_oob(chip->oob_poi, block);
4188 } else
4189 {
4190 set_bad_index_to_oob(chip->oob_poi, FAKE_INDEX);
4191 }
4192
4193 if (mtk_nand_exec_write_page(mtd, page_in_block + mapped_block * page_per_block, mtd->writesize, (u8 *) buf, chip->oob_poi))
4194 {
4195 MSG(INIT, "write fail at block: 0x%x, page: 0x%x\n", mapped_block, page_in_block);
4196 if (update_bmt((u64)((u64)page_in_block + (u64)mapped_block * page_per_block) << chip->page_shift, UPDATE_WRITE_FAIL, (u8 *) buf, chip->oob_poi))
4197 {
4198 MSG(INIT, "Update BMT success\n");
4199 return 0;
4200 } else
4201 {
4202 MSG(INIT, "Update BMT fail\n");
4203 return -EIO;
4204 }
4205 }
4206#if CFG_PERFLOG_DEBUG
4207 do_gettimeofday(&etimer);
4208 g_NandPerfLog.WritePageTotalTime+= Cal_timediff(&etimer,&stimer);
4209 g_NandPerfLog.WritePageCount++;
4210 dump_nand_rwcount();
4211#endif
4212 return 0;
4213}
4214
4215//-------------------------------------------------------------------------------
4216/*
4217static void mtk_nand_command_sp(
4218 struct mtd_info *mtd, unsigned int command, int column, int page_addr)
4219{
4220 g_u4ColAddr = column;
4221 g_u4RowAddr = page_addr;
4222
4223 switch(command)
4224 {
4225 case NAND_CMD_STATUS:
4226 break;
4227
4228 case NAND_CMD_READID:
4229 break;
4230
4231 case NAND_CMD_RESET:
4232 break;
4233
4234 case NAND_CMD_RNDOUT:
4235 case NAND_CMD_RNDOUTSTART:
4236 case NAND_CMD_RNDIN:
4237 case NAND_CMD_CACHEDPROG:
4238 case NAND_CMD_STATUS_MULTI:
4239 default:
4240 break;
4241 }
4242
4243}
4244*/
4245
4246/******************************************************************************
4247 * mtk_nand_command_bp
4248 *
4249 * DESCRIPTION:
4250 * Handle the commands from MTD !
4251 *
4252 * PARAMETERS:
4253 * struct mtd_info *mtd, unsigned int command, int column, int page_addr
4254 *
4255 * RETURNS:
4256 * None
4257 *
4258 * NOTES:
4259 * None
4260 *
4261 ******************************************************************************/
4262static void mtk_nand_command_bp(struct mtd_info *mtd, unsigned int command, int column, int page_addr)
4263{
4264 struct nand_chip *nand = mtd->priv;
4265#ifdef NAND_PFM
4266 struct timeval pfm_time_erase;
4267#endif
4268#if 0
4269// int block_size = 1 << (nand->phys_erase_shift);
4270// int page_per_block = 1 << (nand->phys_erase_shift - nand->page_shift);
4271// u32 block;
4272// u16 page_in_block;
4273// u32 mapped_block;
4274// bool rand= FALSE;
4275 page_addr = mtk_nand_page_transform(mtd,nand,&block,&mapped_block);
4276 page_addr = mapped_block*page_per_block + page_addr;
4277#endif
4278 switch (command)
4279 {
4280 case NAND_CMD_SEQIN:
4281 memset(g_kCMD.au1OOB, 0xFF, sizeof(g_kCMD.au1OOB));
4282 g_kCMD.pDataBuf = NULL;
4283 //}
4284 g_kCMD.u4RowAddr = page_addr;
4285 g_kCMD.u4ColAddr = column;
4286 break;
4287
4288 case NAND_CMD_PAGEPROG:
4289 if (g_kCMD.pDataBuf || (0xFF != g_kCMD.au1OOB[0]))
4290 {
4291 u8 *pDataBuf = g_kCMD.pDataBuf ? g_kCMD.pDataBuf : nand->buffers->databuf;
4292 mtk_nand_exec_write_page(mtd, g_kCMD.u4RowAddr, mtd->writesize, pDataBuf, g_kCMD.au1OOB);
4293 g_kCMD.u4RowAddr = (u32) - 1;
4294 g_kCMD.u4OOBRowAddr = (u32) - 1;
4295 }
4296 break;
4297
4298 case NAND_CMD_READOOB:
4299 g_kCMD.u4RowAddr = page_addr;
4300 g_kCMD.u4ColAddr = column + mtd->writesize;
4301#ifdef NAND_PFM
4302 g_kCMD.pureReadOOB = 1;
4303 g_kCMD.pureReadOOBNum += 1;
4304#endif
4305 break;
4306
4307 case NAND_CMD_READ0:
4308 g_kCMD.u4RowAddr = page_addr;
4309 g_kCMD.u4ColAddr = column;
4310#ifdef NAND_PFM
4311 g_kCMD.pureReadOOB = 0;
4312#endif
4313 break;
4314
4315 case NAND_CMD_ERASE1:
4316 PFM_BEGIN(pfm_time_erase);
4317 (void)mtk_nand_reset();
4318 mtk_nand_set_mode(CNFG_OP_ERASE);
4319 (void)mtk_nand_set_command(NAND_CMD_ERASE1);
4320 (void)mtk_nand_set_address(0, page_addr, 0, devinfo.addr_cycle - 2);
4321 break;
4322
4323 case NAND_CMD_ERASE2:
4324 (void)mtk_nand_set_command(NAND_CMD_ERASE2);
4325 while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY) ;
4326 PFM_END_E(pfm_time_erase);
4327 break;
4328
4329 case NAND_CMD_STATUS:
4330 (void)mtk_nand_reset();
4331 if(mtk_nand_israndomizeron())
4332 {
4333 //g_brandstatus = TRUE;
4334 mtk_nand_turn_off_randomizer();
4335 }
4336 NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
4337 mtk_nand_set_mode(CNFG_OP_SRD);
4338 mtk_nand_set_mode(CNFG_READ_EN);
4339 NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
4340 NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
4341 (void)mtk_nand_set_command(NAND_CMD_STATUS);
4342 NFI_CLN_REG32(NFI_CON_REG16, CON_NFI_NOB_MASK);
4343 mb();
4344 DRV_WriteReg32(NFI_CON_REG16, CON_NFI_SRD | (1 << CON_NFI_NOB_SHIFT));
4345 g_bcmdstatus = true;
4346 break;
4347
4348 case NAND_CMD_RESET:
4349 (void)mtk_nand_reset();
4350 break;
4351
4352 case NAND_CMD_READID:
4353 /* Issue NAND chip reset command */
4354 //NFI_ISSUE_COMMAND (NAND_CMD_RESET, 0, 0, 0, 0);
4355
4356 //timeout = TIMEOUT_4;
4357
4358 //while (timeout)
4359 //timeout--;
4360
4361 mtk_nand_reset();
4362 /* Disable HW ECC */
4363 NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
4364 NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
4365
4366 /* Disable 16-bit I/O */
4367 //NFI_CLN_REG16(NFI_PAGEFMT_REG16, PAGEFMT_DBYTE_EN);
4368
4369 NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN | CNFG_BYTE_RW);
4370 (void)mtk_nand_reset();
4371 mb();
4372 mtk_nand_set_mode(CNFG_OP_SRD);
4373 (void)mtk_nand_set_command(NAND_CMD_READID);
4374 (void)mtk_nand_set_address(0, 0, 1, 0);
4375 DRV_WriteReg32(NFI_CON_REG16, CON_NFI_SRD);
4376 while (DRV_Reg32(NFI_STA_REG32) & STA_DATAR_STATE) ;
4377 break;
4378
4379 default:
4380 BUG();
4381 break;
4382 }
4383}
4384
4385/******************************************************************************
4386 * mtk_nand_select_chip
4387 *
4388 * DESCRIPTION:
4389 * Select a chip !
4390 *
4391 * PARAMETERS:
4392 * struct mtd_info *mtd, int chip
4393 *
4394 * RETURNS:
4395 * None
4396 *
4397 * NOTES:
4398 * None
4399 *
4400 ******************************************************************************/
4401static void mtk_nand_select_chip(struct mtd_info *mtd, int chip)
4402{
4403 if (chip == -1 && false == g_bInitDone)
4404 {
4405 struct nand_chip *nand = mtd->priv;
4406
4407 struct mtk_nand_host *host = nand->priv;
4408 struct mtk_nand_host_hw *hw = host->hw;
4409 u32 spare_per_sector = mtd->oobsize/( mtd->writesize/hw->nand_sec_size);
4410 u32 ecc_bit = 4;
4411 u32 spare_bit = PAGEFMT_SPARE_16;
4412 switch(spare_per_sector)
4413 {
4414#ifndef MTK_COMBO_NAND_SUPPORT
4415 case 16:
4416 spare_bit = PAGEFMT_SPARE_16;
4417 ecc_bit = 4;
4418 spare_per_sector = 16;
4419 break;
4420 case 26:
4421 case 27:
4422 case 28:
4423 spare_bit = PAGEFMT_SPARE_26;
4424 ecc_bit = 10;
4425 spare_per_sector = 26;
4426 break;
4427 case 32:
4428 ecc_bit = 12;
4429 if(MLC_DEVICE == TRUE)
4430 spare_bit = PAGEFMT_SPARE_32_1KS;
4431 else
4432 spare_bit = PAGEFMT_SPARE_32;
4433 spare_per_sector = 32;
4434 break;
4435 case 40:
4436 ecc_bit = 18;
4437 spare_bit = PAGEFMT_SPARE_40;
4438 spare_per_sector = 40;
4439 break;
4440 case 44:
4441 ecc_bit = 20;
4442 spare_bit = PAGEFMT_SPARE_44;
4443 spare_per_sector = 44;
4444 break;
4445 case 48:
4446 case 49:
4447 ecc_bit = 22;
4448 spare_bit = PAGEFMT_SPARE_48;
4449 spare_per_sector = 48;
4450 break;
4451 case 50:
4452 case 51:
4453 ecc_bit = 24;
4454 spare_bit = PAGEFMT_SPARE_50;
4455 spare_per_sector = 50;
4456 break;
4457 case 52:
4458 case 54:
4459 case 56:
4460 ecc_bit = 24;
4461 if(MLC_DEVICE == TRUE)
4462 spare_bit = PAGEFMT_SPARE_52_1KS;
4463 else
4464 spare_bit = PAGEFMT_SPARE_52;
4465 spare_per_sector = 32;
4466 break;
4467#endif
4468 case 62:
4469 case 63:
4470 ecc_bit = 28;
4471 spare_bit = PAGEFMT_SPARE_62;
4472 spare_per_sector = 62;
4473 break;
4474 case 64:
4475 ecc_bit = 32;
4476 if(MLC_DEVICE == TRUE)
4477 spare_bit = PAGEFMT_SPARE_64_1KS;
4478 else
4479 spare_bit = PAGEFMT_SPARE_64;
4480 spare_per_sector = 64;
4481 break;
4482 case 72:
4483 ecc_bit = 36;
4484 if(MLC_DEVICE == TRUE)
4485 spare_bit = PAGEFMT_SPARE_72_1KS;
4486 spare_per_sector = 72;
4487 break;
4488 case 80:
4489 ecc_bit = 40;
4490 if(MLC_DEVICE == TRUE)
4491 spare_bit = PAGEFMT_SPARE_80_1KS;
4492 spare_per_sector = 80;
4493 break;
4494 case 88:
4495 ecc_bit = 44;
4496 if(MLC_DEVICE == TRUE)
4497 spare_bit = PAGEFMT_SPARE_88_1KS;
4498 spare_per_sector = 88;
4499 break;
4500 case 96:
4501 case 98:
4502 ecc_bit = 48;
4503 if(MLC_DEVICE == TRUE)
4504 spare_bit = PAGEFMT_SPARE_96_1KS;
4505 spare_per_sector = 96;
4506 break;
4507 case 100:
4508 case 102:
4509 case 104:
4510 ecc_bit = 52;
4511 if(MLC_DEVICE == TRUE)
4512 spare_bit = PAGEFMT_SPARE_100_1KS;
4513 spare_per_sector = 100;
4514 break;
4515 case 124:
4516 case 126:
4517 case 128:
4518 ecc_bit = 60;
4519 if(MLC_DEVICE == TRUE)
4520 spare_bit = PAGEFMT_SPARE_124_1KS;
4521 spare_per_sector = 124;
4522 break;
4523 default:
4524 MSG(INIT, "[NAND]: NFI not support oobsize: %x\n", spare_per_sector);
4525 ASSERT(0);
4526 }
4527
4528 mtd->oobsize = spare_per_sector*(mtd->writesize/hw->nand_sec_size);
4529 printk("[NAND]select ecc bit:%d, sparesize :%d\n",ecc_bit,mtd->oobsize);
4530/* Setup PageFormat */
4531
4532 if (16384 == mtd->writesize)
4533 {
4534 NFI_SET_REG16(NFI_PAGEFMT_REG16, (spare_bit << PAGEFMT_SPARE_SHIFT) | PAGEFMT_16K_1KS);
4535 nand->cmdfunc = mtk_nand_command_bp;
4536 } else if (8192 == mtd->writesize)
4537 {
4538 NFI_SET_REG16(NFI_PAGEFMT_REG16, (spare_bit << PAGEFMT_SPARE_SHIFT) | PAGEFMT_8K_1KS);
4539 nand->cmdfunc = mtk_nand_command_bp;
4540 } else if (4096 == mtd->writesize)
4541 {
4542 if(MLC_DEVICE == FALSE)
4543 NFI_SET_REG16(NFI_PAGEFMT_REG16, (spare_bit << PAGEFMT_SPARE_SHIFT) | PAGEFMT_4K);
4544 else
4545 NFI_SET_REG16(NFI_PAGEFMT_REG16, (spare_bit << PAGEFMT_SPARE_SHIFT) | PAGEFMT_4K_1KS);
4546 nand->cmdfunc = mtk_nand_command_bp;
4547 } else if (2048 == mtd->writesize)
4548 {
4549 if(MLC_DEVICE == FALSE)
4550 NFI_SET_REG16(NFI_PAGEFMT_REG16, (spare_bit << PAGEFMT_SPARE_SHIFT) | PAGEFMT_2K);
4551 else
4552 NFI_SET_REG16(NFI_PAGEFMT_REG16, (spare_bit << PAGEFMT_SPARE_SHIFT) | PAGEFMT_2K_1KS);
4553 nand->cmdfunc = mtk_nand_command_bp;
4554 }
4555 ecc_threshold = ecc_bit*4/5;
4556 ECC_Config(hw,ecc_bit);
4557 g_bInitDone = true;
4558
4559 //xiaolei for kernel3.10
4560 nand->ecc.strength = ecc_bit;
4561 mtd->bitflip_threshold = nand->ecc.strength;
4562 }
4563 switch (chip)
4564 {
4565 case -1:
4566 break;
4567 case 0:
4568#ifdef CFG_FPGA_PLATFORM // FPGA NAND is placed at CS1 not CS0
4569 DRV_WriteReg16(NFI_CSEL_REG16, 0);
4570 break;
4571#endif
4572 case 1:
4573 DRV_WriteReg16(NFI_CSEL_REG16, chip);
4574 break;
4575 }
4576}
4577
4578/******************************************************************************
4579 * mtk_nand_read_byte
4580 *
4581 * DESCRIPTION:
4582 * Read a byte of data !
4583 *
4584 * PARAMETERS:
4585 * struct mtd_info *mtd
4586 *
4587 * RETURNS:
4588 * None
4589 *
4590 * NOTES:
4591 * None
4592 *
4593 ******************************************************************************/
4594static uint8_t mtk_nand_read_byte(struct mtd_info *mtd)
4595{
4596#if 0
4597 //while(0 == FIFO_RD_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)));
4598 /* Check the PIO bit is ready or not */
4599 u32 timeout = TIMEOUT_4;
4600 uint8_t retval = 0;
4601 WAIT_NFI_PIO_READY(timeout);
4602
4603 retval = DRV_Reg8(NFI_DATAR_REG32);
4604 MSG(INIT, "mtk_nand_read_byte (0x%x)\n", retval);
4605
4606 if (g_bcmdstatus)
4607 {
4608 NFI_SET_REG16(NFI_CNFG_REG16, CNFG_AHB);
4609 NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
4610 g_bcmdstatus = false;
4611 }
4612
4613 return retval;
4614#endif
4615 uint8_t retval = 0;
4616
4617 if (!mtk_nand_pio_ready())
4618 {
4619 printk("pio ready timeout\n");
4620 retval = false;
4621 }
4622
4623 if (g_bcmdstatus)
4624 {
4625 retval = DRV_Reg8(NFI_DATAR_REG32);
4626 NFI_CLN_REG32(NFI_CON_REG16, CON_NFI_NOB_MASK);
4627 mtk_nand_reset();
4628#if (__INTERNAL_USE_AHB_MODE__)
4629 NFI_SET_REG16(NFI_CNFG_REG16, CNFG_AHB);
4630#endif
4631 if (g_bHwEcc)
4632 {
4633 NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
4634 } else
4635 {
4636 NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
4637 }
4638 g_bcmdstatus = false;
4639 } else
4640 retval = DRV_Reg8(NFI_DATAR_REG32);
4641
4642 /*if(g_brandstatus)
4643 {
4644 g_brandstatus = FALSE;
4645 mtk_nand_turn_on_randomizer(g_kCMD.u4RowAddr, g_kCMD.u4ColAddr / devinfo.sectorsize, FALSE);
4646 }*/
4647
4648 return retval;
4649}
4650
4651/******************************************************************************
4652 * mtk_nand_read_buf
4653 *
4654 * DESCRIPTION:
4655 * Read NAND data !
4656 *
4657 * PARAMETERS:
4658 * struct mtd_info *mtd, uint8_t *buf, int len
4659 *
4660 * RETURNS:
4661 * None
4662 *
4663 * NOTES:
4664 * None
4665 *
4666 ******************************************************************************/
4667static void mtk_nand_read_buf(struct mtd_info *mtd, uint8_t * buf, int len)
4668{
4669 struct nand_chip *nand = (struct nand_chip *)mtd->priv;
4670 struct NAND_CMD *pkCMD = &g_kCMD;
4671 u32 u4ColAddr = pkCMD->u4ColAddr;
4672 u32 u4PageSize = mtd->writesize;
4673
4674 if (u4ColAddr < u4PageSize)
4675 {
4676 if ((u4ColAddr == 0) && (len >= u4PageSize))
4677 {
4678 mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, buf, pkCMD->au1OOB);
4679 if (len > u4PageSize)
4680 {
4681 u32 u4Size = min(len - u4PageSize, sizeof(pkCMD->au1OOB));
4682 memcpy(buf + u4PageSize, pkCMD->au1OOB, u4Size);
4683 }
4684 } else
4685 {
4686 mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, nand->buffers->databuf, pkCMD->au1OOB);
4687 memcpy(buf, nand->buffers->databuf + u4ColAddr, len);
4688 }
4689 pkCMD->u4OOBRowAddr = pkCMD->u4RowAddr;
4690 } else
4691 {
4692 u32 u4Offset = u4ColAddr - u4PageSize;
4693 u32 u4Size = min(len - u4Offset, sizeof(pkCMD->au1OOB));
4694 if (pkCMD->u4OOBRowAddr != pkCMD->u4RowAddr)
4695 {
4696 mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, nand->buffers->databuf, pkCMD->au1OOB);
4697 pkCMD->u4OOBRowAddr = pkCMD->u4RowAddr;
4698 }
4699 memcpy(buf, pkCMD->au1OOB + u4Offset, u4Size);
4700 }
4701 pkCMD->u4ColAddr += len;
4702}
4703
4704/******************************************************************************
4705 * mtk_nand_write_buf
4706 *
4707 * DESCRIPTION:
4708 * Write NAND data !
4709 *
4710 * PARAMETERS:
4711 * struct mtd_info *mtd, const uint8_t *buf, int len
4712 *
4713 * RETURNS:
4714 * None
4715 *
4716 * NOTES:
4717 * None
4718 *
4719 ******************************************************************************/
4720static void mtk_nand_write_buf(struct mtd_info *mtd, const uint8_t * buf, int len)
4721{
4722 struct NAND_CMD *pkCMD = &g_kCMD;
4723 u32 u4ColAddr = pkCMD->u4ColAddr;
4724 u32 u4PageSize = mtd->writesize;
4725 int i4Size, i;
4726
4727 if (u4ColAddr >= u4PageSize)
4728 {
4729 u32 u4Offset = u4ColAddr - u4PageSize;
4730 u8 *pOOB = pkCMD->au1OOB + u4Offset;
4731 i4Size = min(len, (int)(sizeof(pkCMD->au1OOB) - u4Offset));
4732
4733 for (i = 0; i < i4Size; i++)
4734 {
4735 pOOB[i] &= buf[i];
4736 }
4737 } else
4738 {
4739 pkCMD->pDataBuf = (u8 *) buf;
4740 }
4741
4742 pkCMD->u4ColAddr += len;
4743}
4744
4745/******************************************************************************
4746 * mtk_nand_write_page_hwecc
4747 *
4748 * DESCRIPTION:
4749 * Write NAND data with hardware ecc !
4750 *
4751 * PARAMETERS:
4752 * struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf
4753 *
4754 * RETURNS:
4755 * None
4756 *
4757 * NOTES:
4758 * None
4759 *
4760 ******************************************************************************/
4761static void mtk_nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf, int oob_required)
4762{
4763 mtk_nand_write_buf(mtd, buf, mtd->writesize);
4764 mtk_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
4765}
4766
4767/******************************************************************************
4768 * mtk_nand_read_page_hwecc
4769 *
4770 * DESCRIPTION:
4771 * Read NAND data with hardware ecc !
4772 *
4773 * PARAMETERS:
4774 * struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf
4775 *
4776 * RETURNS:
4777 * None
4778 *
4779 * NOTES:
4780 * None
4781 *
4782 ******************************************************************************/
4783static int mtk_nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,uint8_t *buf, int oob_required, int page)
4784{
4785#if 0
4786 mtk_nand_read_buf(mtd, buf, mtd->writesize);
4787 mtk_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize);
4788#else
4789 struct NAND_CMD *pkCMD = &g_kCMD;
4790 u32 u4ColAddr = pkCMD->u4ColAddr;
4791 u32 u4PageSize = mtd->writesize;
4792
4793 if (u4ColAddr == 0)
4794 {
4795 mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, buf, chip->oob_poi);
4796 pkCMD->u4ColAddr += u4PageSize + mtd->oobsize;
4797 }
4798#endif
4799 return 0;
4800}
4801
4802/******************************************************************************
4803 *
4804 * Read a page to a logical address
4805 *
4806 *****************************************************************************/
4807static int mtk_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip, u8 * buf, int page)
4808{
4809// int block_size = 1 << (chip->phys_erase_shift);
4810 int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
4811// int page_per_block1 = page_per_block;
4812 u32 block;
4813 u32 page_in_block;
4814 u32 mapped_block;
4815 int bRet = ERR_RTN_SUCCESS;
4816#if CFG_PERFLOG_DEBUG
4817 struct timeval stimer,etimer;
4818 do_gettimeofday(&stimer);
4819#endif
4820 page_in_block = mtk_nand_page_transform(mtd,chip,page,&block,&mapped_block);
4821 //MSG(INIT,"[READ] %d, %d, %d %d\n",mapped_block, block, page_in_block, page_per_block);
4822
4823 bRet = mtk_nand_exec_read_page(mtd, page_in_block + mapped_block * page_per_block, mtd->writesize, buf, chip->oob_poi);
4824 if (bRet == ERR_RTN_SUCCESS)
4825 {
4826#if CFG_PERFLOG_DEBUG
4827 do_gettimeofday(&etimer);
4828 g_NandPerfLog.ReadPageTotalTime+= Cal_timediff(&etimer,&stimer);
4829 g_NandPerfLog.ReadPageCount++;
4830 dump_nand_rwcount();
4831#endif
4832 return 0;
4833 }
4834
4835 /* else
4836 return -EIO; */
4837 return 0;
4838}
4839
4840static int mtk_nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, u8 * buf, int page, int subpage, int subpageno)
4841{
4842// int block_size = 1 << (chip->phys_erase_shift);
4843 int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
4844// int page_per_block1 = page_per_block;
4845 u32 block;
4846 int coladdr;
4847 u32 page_in_block;
4848 u32 mapped_block;
4849// bool readRetry = FALSE;
4850// int retryCount = 0;
4851 int bRet = ERR_RTN_SUCCESS;
4852 int sec_num = 1<<(chip->page_shift-host->hw->nand_sec_shift);
4853 int spare_per_sector = mtd->oobsize/sec_num;
4854#if CFG_PERFLOG_DEBUG
4855 struct timeval stimer,etimer;
4856 do_gettimeofday(&stimer);
4857#endif
4858 page_in_block = mtk_nand_page_transform(mtd,chip,page,&block,&mapped_block);
4859 coladdr = subpage*(devinfo.sectorsize+spare_per_sector);
4860 //coladdr = subpage*(devinfo.sectorsize);
4861 //MSG(INIT,"[Read Subpage] %d, %d, %d %d\n",mapped_block, block, page_in_block, page_per_block);
4862
4863 bRet = mtk_nand_exec_read_sector(mtd, page_in_block + mapped_block * page_per_block, coladdr, devinfo.sectorsize*subpageno, buf, chip->oob_poi,subpageno);
4864 //memset(bean_buffer, 0xFF, LPAGE);
4865 //bRet = mtk_nand_exec_read_page(mtd, page, mtd->writesize, bean_buffer, chip->oob_poi);
4866 if (bRet == ERR_RTN_SUCCESS)
4867 {
4868#if CFG_PERFLOG_DEBUG
4869 do_gettimeofday(&etimer);
4870 g_NandPerfLog.ReadSubPageTotalTime+= Cal_timediff(&etimer,&stimer);
4871 g_NandPerfLog.ReadSubPageCount++;
4872 dump_nand_rwcount();
4873#endif
4874 return 0;
4875 }
4876 //memcpy(buf, bean_buffer+coladdr, mtd->writesize);
4877 /* else
4878 return -EIO; */
4879 return 0;
4880}
4881
4882
4883/******************************************************************************
4884 *
4885 * Erase a block at a logical address
4886 *
4887 *****************************************************************************/
4888int mtk_nand_erase_hw(struct mtd_info *mtd, int page)
4889{
4890#ifdef PWR_LOSS_SPOH
4891 struct timeval pl_time_write;
4892 suseconds_t duration;
4893 u32 time;
4894#endif
4895 int result;
4896 struct nand_chip *chip = (struct nand_chip *)mtd->priv;
4897#ifdef _MTK_NAND_DUMMY_DRIVER_
4898 if (dummy_driver_debug)
4899 {
4900 unsigned long long time = sched_clock();
4901 if (!((time * 123 + 59) % 1024))
4902 {
4903 printk(KERN_INFO "[NAND_DUMMY_DRIVER] Simulate erase error at page: 0x%x\n", page);
4904 return NAND_STATUS_FAIL;
4905 }
4906 }
4907#endif
4908#if CFG_2CS_NAND
4909 if (g_bTricky_CS)
4910 {
4911 page = mtk_nand_cs_on(chip, NFI_TRICKY_CS, page);
4912 }
4913#endif
4914 PL_NAND_BEGIN(pl_time_write);
4915 PL_TIME_RAND_ERASE(chip, page, time);
4916 chip->erase_cmd(mtd, page);
4917 PL_NAND_RESET(time);
4918 result=chip->waitfunc(mtd, chip);
4919 PL_NAND_END(pl_time_write, duration);
4920 PL_TIME_ERASE(duration);
4921 return result;
4922}
4923
4924static int mtk_nand_erase(struct mtd_info *mtd, int page)
4925{
4926 int status;
4927 struct nand_chip *chip = mtd->priv;
4928// int block_size = 1 << (chip->phys_erase_shift);
4929 int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
4930 u32 block;
4931 u32 page_in_block;
4932 u32 mapped_block;
4933#if CFG_PERFLOG_DEBUG
4934 struct timeval stimer,etimer;
4935 do_gettimeofday(&stimer);
4936#endif
4937 page_in_block = mtk_nand_page_transform(mtd,chip,page,&block,&mapped_block);
4938 //MSG(INIT, "[ERASE] 0x%x 0x%x\n", mapped_block, page);
4939 status = mtk_nand_erase_hw(mtd, page_in_block + page_per_block * mapped_block);
4940
4941 if (status & NAND_STATUS_FAIL)
4942 {
4943 if (update_bmt((u64)((u64)page_in_block + (u64)mapped_block * page_per_block) << chip->page_shift, UPDATE_ERASE_FAIL, NULL, NULL))
4944 {
4945 MSG(INIT, "Erase fail at block: 0x%x, update BMT success\n", mapped_block);
4946 return 0;
4947 } else
4948 {
4949 MSG(INIT, "Erase fail at block: 0x%x, update BMT fail\n", mapped_block);
4950 return NAND_STATUS_FAIL;
4951 }
4952 }
4953#if CFG_PERFLOG_DEBUG
4954 do_gettimeofday(&etimer);
4955 g_NandPerfLog.EraseBlockTotalTime+= Cal_timediff(&etimer,&stimer);
4956 g_NandPerfLog.EraseBlockCount++;
4957 dump_nand_rwcount();
4958#endif
4959 return 0;
4960}
4961
4962/******************************************************************************
4963 * mtk_nand_read_multi_page_cache
4964 *
4965 * description:
4966 * read multi page data using cache read
4967 *
4968 * parameters:
4969 * struct mtd_info *mtd, struct nand_chip *chip, int page, struct mtd_oob_ops *ops
4970 *
4971 * returns:
4972 * none
4973 *
4974 * notes:
4975 * only available for nand flash support cache read.
4976 * read main data only.
4977 *
4978 *****************************************************************************/
4979#if 0
4980static int mtk_nand_read_multi_page_cache(struct mtd_info *mtd, struct nand_chip *chip, int page, struct mtd_oob_ops *ops)
4981{
4982 int res = -EIO;
4983 int len = ops->len;
4984 struct mtd_ecc_stats stat = mtd->ecc_stats;
4985 uint8_t *buf = ops->datbuf;
4986
4987 if (!mtk_nand_ready_for_read(chip, page, 0, true, buf))
4988 return -EIO;
4989
4990 while (len > 0)
4991 {
4992 mtk_nand_set_mode(CNFG_OP_CUST);
4993 DRV_WriteReg32(NFI_CON_REG16, 8 << CON_NFI_SEC_SHIFT);
4994
4995 if (len > mtd->writesize) // remained more than one page
4996 {
4997 if (!mtk_nand_set_command(0x31)) // todo: add cache read command
4998 goto ret;
4999 } else
5000 {
5001 if (!mtk_nand_set_command(0x3f)) // last page remained
5002 goto ret;
5003 }
5004
5005 mtk_nand_status_ready(STA_NAND_BUSY);
5006
5007#ifdef __INTERNAL_USE_AHB_MODE__
5008 //if (!mtk_nand_dma_read_data(buf, mtd->writesize))
5009 if (!mtk_nand_read_page_data(mtd, buf, mtd->writesize))
5010 goto ret;
5011#else
5012 if (!mtk_nand_mcu_read_data(buf, mtd->writesize))
5013 goto ret;
5014#endif
5015
5016 // get ecc error info
5017 mtk_nand_check_bch_error(mtd, buf, 3, page);
5018 ECC_Decode_End();
5019
5020 page++;
5021 len -= mtd->writesize;
5022 buf += mtd->writesize;
5023 ops->retlen += mtd->writesize;
5024
5025 if (len > 0)
5026 {
5027 ECC_Decode_Start();
5028 mtk_nand_reset();
5029 }
5030
5031 }
5032
5033 res = 0;
5034
5035 ret:
5036 mtk_nand_stop_read();
5037
5038 if (res)
5039 return res;
5040
5041 if (mtd->ecc_stats.failed > stat.failed)
5042 {
5043 printk(KERN_INFO "ecc fail happened\n");
5044 return -EBADMSG;
5045 }
5046
5047 return mtd->ecc_stats.corrected - stat.corrected ? -EUCLEAN : 0;
5048}
5049#endif
5050
5051/******************************************************************************
5052 * mtk_nand_read_oob_raw
5053 *
5054 * DESCRIPTION:
5055 * Read oob data
5056 *
5057 * PARAMETERS:
5058 * struct mtd_info *mtd, const uint8_t *buf, int addr, int len
5059 *
5060 * RETURNS:
5061 * None
5062 *
5063 * NOTES:
5064 * this function read raw oob data out of flash, so need to re-organise
5065 * data format before using.
5066 * len should be times of 8, call this after nand_get_device.
5067 * Should notice, this function read data without ECC protection.
5068 *
5069 *****************************************************************************/
5070static int mtk_nand_read_oob_raw(struct mtd_info *mtd, uint8_t * buf, int page_addr, int len)
5071{
5072 struct nand_chip *chip = (struct nand_chip *)mtd->priv;
5073 u32 col_addr = 0;
5074 u32 sector = 0;
5075 int res = 0;
5076 u32 colnob = 2, rawnob = devinfo.addr_cycle - 2;
5077 int randomread = 0;
5078 int read_len = 0;
5079 int sec_num = 1<<(chip->page_shift-host->hw->nand_sec_shift);
5080 int spare_per_sector = mtd->oobsize/sec_num;
5081 u32 sector_size = NAND_SECTOR_SIZE;
5082 if(devinfo.sectorsize == 1024)
5083 sector_size = 1024;
5084
5085 if (len > NAND_MAX_OOBSIZE || len % OOB_AVAI_PER_SECTOR || !buf)
5086 {
5087 printk(KERN_WARNING "[%s] invalid parameter, len: %d, buf: %p\n", __FUNCTION__, len, buf);
5088 return -EINVAL;
5089 }
5090 if (len > spare_per_sector)
5091 {
5092 randomread = 1;
5093 }
5094 if (!randomread || !(devinfo.advancedmode & RAMDOM_READ))
5095 {
5096 while (len > 0)
5097 {
5098 read_len = min(len, spare_per_sector);
5099 col_addr = sector_size + sector * (sector_size + spare_per_sector); // TODO: Fix this hard-code 16
5100 if (!mtk_nand_ready_for_read(chip, page_addr, col_addr, sec_num, false, NULL))
5101 {
5102 printk(KERN_WARNING "mtk_nand_ready_for_read return failed\n");
5103 res = -EIO;
5104 goto error;
5105 }
5106 if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) // TODO: and this 8
5107 {
5108 printk(KERN_WARNING "mtk_nand_mcu_read_data return failed\n");
5109 res = -EIO;
5110 goto error;
5111 }
5112 mtk_nand_stop_read();
5113 //dump_data(buf + 16 * sector,16);
5114 sector++;
5115 len -= read_len;
5116
5117 }
5118 } else //should be 64
5119 {
5120 col_addr = sector_size;
5121 if (chip->options & NAND_BUSWIDTH_16)
5122 {
5123 col_addr /= 2;
5124 }
5125
5126 if (!mtk_nand_reset())
5127 {
5128 goto error;
5129 }
5130
5131 mtk_nand_set_mode(0x6000);
5132 NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
5133 DRV_WriteReg32(NFI_CON_REG16, 4 << CON_NFI_SEC_SHIFT);
5134
5135 NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
5136 NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
5137
5138 mtk_nand_set_autoformat(false);
5139
5140 if (!mtk_nand_set_command(NAND_CMD_READ0))
5141 {
5142 goto error;
5143 }
5144 //1 FIXED ME: For Any Kind of AddrCycle
5145 if (!mtk_nand_set_address(col_addr, page_addr, colnob, rawnob))
5146 {
5147 goto error;
5148 }
5149
5150 if (!mtk_nand_set_command(NAND_CMD_READSTART))
5151 {
5152 goto error;
5153 }
5154 if (!mtk_nand_status_ready(STA_NAND_BUSY))
5155 {
5156 goto error;
5157 }
5158
5159 read_len = min(len, spare_per_sector);
5160 if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) // TODO: and this 8
5161 {
5162 printk(KERN_WARNING "mtk_nand_mcu_read_data return failed first 16\n");
5163 res = -EIO;
5164 goto error;
5165 }
5166 sector++;
5167 len -= read_len;
5168 mtk_nand_stop_read();
5169 while (len > 0)
5170 {
5171 read_len = min(len, spare_per_sector);
5172 if (!mtk_nand_set_command(0x05))
5173 {
5174 goto error;
5175 }
5176
5177 col_addr = sector_size + sector * (sector_size + 16); //:TODO_JP careful 16
5178 if (chip->options & NAND_BUSWIDTH_16)
5179 {
5180 col_addr /= 2;
5181 }
5182 DRV_WriteReg32(NFI_COLADDR_REG32, col_addr);
5183 DRV_WriteReg16(NFI_ADDRNOB_REG16, 2);
5184 DRV_WriteReg32(NFI_CON_REG16, 4 << CON_NFI_SEC_SHIFT);
5185
5186 if (!mtk_nand_status_ready(STA_ADDR_STATE))
5187 {
5188 goto error;
5189 }
5190
5191 if (!mtk_nand_set_command(0xE0))
5192 {
5193 goto error;
5194 }
5195 if (!mtk_nand_status_ready(STA_NAND_BUSY))
5196 {
5197 goto error;
5198 }
5199 if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) // TODO: and this 8
5200 {
5201 printk(KERN_WARNING "mtk_nand_mcu_read_data return failed first 16\n");
5202 res = -EIO;
5203 goto error;
5204 }
5205 mtk_nand_stop_read();
5206 sector++;
5207 len -= read_len;
5208 }
5209 //dump_data(&testbuf[16],16);
5210 //printk(KERN_ERR "\n");
5211 }
5212 error:
5213 NFI_CLN_REG32(NFI_CON_REG16, CON_NFI_BRD);
5214 return res;
5215}
5216
5217static int mtk_nand_write_oob_raw(struct mtd_info *mtd, const uint8_t * buf, int page_addr, int len)
5218{
5219 struct nand_chip *chip = mtd->priv;
5220 // int i;
5221 u32 col_addr = 0;
5222 u32 sector = 0;
5223 // int res = 0;
5224 // u32 colnob=2, rawnob=devinfo.addr_cycle-2;
5225 // int randomread =0;
5226 int write_len = 0;
5227 int status;
5228 int sec_num = 1<<(chip->page_shift-host->hw->nand_sec_shift);
5229 int spare_per_sector = mtd->oobsize/sec_num;
5230 u32 sector_size = NAND_SECTOR_SIZE;
5231 if(devinfo.sectorsize == 1024)
5232 sector_size = 1024;
5233
5234 if (len > NAND_MAX_OOBSIZE || len % OOB_AVAI_PER_SECTOR || !buf)
5235 {
5236 printk(KERN_WARNING "[%s] invalid parameter, len: %d, buf: %p\n", __FUNCTION__, len, buf);
5237 return -EINVAL;
5238 }
5239
5240 while (len > 0)
5241 {
5242 write_len = min(len, spare_per_sector);
5243 col_addr = sector * (sector_size + spare_per_sector) + sector_size;
5244 if (!mtk_nand_ready_for_write(chip, page_addr, col_addr, false, NULL))
5245 {
5246 return -EIO;
5247 }
5248
5249 if (!mtk_nand_mcu_write_data(mtd, buf + sector * spare_per_sector, write_len))
5250 {
5251 return -EIO;
5252 }
5253
5254 (void)mtk_nand_check_RW_count(write_len);
5255 NFI_CLN_REG32(NFI_CON_REG16, CON_NFI_BWR);
5256 (void)mtk_nand_set_command(NAND_CMD_PAGEPROG);
5257
5258 while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY) ;
5259
5260 status = chip->waitfunc(mtd, chip);
5261 if (status & NAND_STATUS_FAIL)
5262 {
5263 printk(KERN_INFO "status: %d\n", status);
5264 return -EIO;
5265 }
5266
5267 len -= write_len;
5268 sector++;
5269 }
5270
5271 return 0;
5272}
5273
5274static int mtk_nand_write_oob_hw(struct mtd_info *mtd, struct nand_chip *chip, int page)
5275{
5276 // u8 *buf = chip->oob_poi;
5277 int i, iter;
5278
5279 int sec_num = 1<<(chip->page_shift-host->hw->nand_sec_shift);
5280 int spare_per_sector = mtd->oobsize/sec_num;
5281
5282 memcpy(local_oob_buf, chip->oob_poi, mtd->oobsize);
5283
5284 // copy ecc data
5285 for (i = 0; i < chip->ecc.layout->eccbytes; i++)
5286 {
5287 iter = (i / OOB_AVAI_PER_SECTOR) * spare_per_sector + OOB_AVAI_PER_SECTOR + i % OOB_AVAI_PER_SECTOR;
5288 local_oob_buf[iter] = chip->oob_poi[chip->ecc.layout->eccpos[i]];
5289 // chip->oob_poi[chip->ecc.layout->eccpos[i]] = local_oob_buf[iter];
5290 }
5291
5292 // copy FDM data
5293 for (i = 0; i < sec_num; i++)
5294 {
5295 memcpy(&local_oob_buf[i * spare_per_sector], &chip->oob_poi[i * OOB_AVAI_PER_SECTOR], OOB_AVAI_PER_SECTOR);
5296 }
5297
5298 return mtk_nand_write_oob_raw(mtd, local_oob_buf, page, mtd->oobsize);
5299}
5300
5301static int mtk_nand_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
5302{
5303// int block_size = 1 << (chip->phys_erase_shift);
5304 int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
5305// int page_per_block1 = page_per_block;
5306 u32 block;
5307 u16 page_in_block;
5308 u32 mapped_block;
5309
5310 //block = page / page_per_block1;
5311 //mapped_block = get_mapping_block_index(block);
5312 page_in_block = mtk_nand_page_transform(mtd,chip,page,&block,&mapped_block);
5313
5314 if (mapped_block != block)
5315 {
5316 set_bad_index_to_oob(chip->oob_poi, block);
5317 } else
5318 {
5319 set_bad_index_to_oob(chip->oob_poi, FAKE_INDEX);
5320 }
5321
5322 if (mtk_nand_write_oob_hw(mtd, chip, page_in_block + mapped_block * page_per_block /* page */ ))
5323 {
5324 MSG(INIT, "write oob fail at block: 0x%x, page: 0x%x\n", mapped_block, page_in_block);
5325 if (update_bmt((u64)((u64)page_in_block + (u64)mapped_block * page_per_block) << chip->page_shift, UPDATE_WRITE_FAIL, NULL, chip->oob_poi))
5326 {
5327 MSG(INIT, "Update BMT success\n");
5328 return 0;
5329 } else
5330 {
5331 MSG(INIT, "Update BMT fail\n");
5332 return -EIO;
5333 }
5334 }
5335
5336 return 0;
5337}
5338
5339int mtk_nand_block_markbad_hw(struct mtd_info *mtd, loff_t offset)
5340{
5341 struct nand_chip *chip = mtd->priv;
5342 int block = (int)(offset >> chip->phys_erase_shift);
5343 int page = block * (1 << (chip->phys_erase_shift - chip->page_shift));
5344 int ret;
5345
5346 u8 buf[8];
5347 memset(buf, 0xFF, 8);
5348 buf[0] = 0;
5349
5350 ret = mtk_nand_write_oob_raw(mtd, buf, page, 8);
5351 return ret;
5352}
5353
5354static int mtk_nand_block_markbad(struct mtd_info *mtd, loff_t offset)
5355{
5356 struct nand_chip *chip = mtd->priv;
5357 u32 block = (u32)(offset >> chip->phys_erase_shift);
5358 int page = block * (1 << (chip->phys_erase_shift - chip->page_shift));
5359 u32 mapped_block;
5360 int ret;
5361
5362 nand_get_device(mtd, FL_WRITING);
5363
5364 //mapped_block = get_mapping_block_index(block);
5365 page = mtk_nand_page_transform(mtd,chip,page,&block,&mapped_block);
5366 ret = mtk_nand_block_markbad_hw(mtd, mapped_block << chip->phys_erase_shift);
5367
5368 nand_release_device(mtd);
5369
5370 return ret;
5371}
5372
5373int mtk_nand_read_oob_hw(struct mtd_info *mtd, struct nand_chip *chip, int page)
5374{
5375 int i;
5376 u8 iter = 0;
5377
5378 int sec_num = 1<<(chip->page_shift-host->hw->nand_sec_shift);
5379 int spare_per_sector = mtd->oobsize/sec_num;
5380#ifdef TESTTIME
5381 unsigned long long time1, time2;
5382
5383 time1 = sched_clock();
5384#endif
5385
5386 if (mtk_nand_read_oob_raw(mtd, chip->oob_poi, page, mtd->oobsize))
5387 {
5388 // printk(KERN_ERR "[%s]mtk_nand_read_oob_raw return failed\n", __FUNCTION__);
5389 return -EIO;
5390 }
5391#ifdef TESTTIME
5392 time2 = sched_clock() - time1;
5393 if (!readoobflag)
5394 {
5395 readoobflag = 1;
5396 printk(KERN_ERR "[%s] time is %llu", __FUNCTION__, time2);
5397 }
5398#endif
5399
5400 // adjust to ecc physical layout to memory layout
5401 /*********************************************************/
5402 /* FDM0 | ECC0 | FDM1 | ECC1 | FDM2 | ECC2 | FDM3 | ECC3 */
5403 /* 8B | 8B | 8B | 8B | 8B | 8B | 8B | 8B */
5404 /*********************************************************/
5405
5406 memcpy(local_oob_buf, chip->oob_poi, mtd->oobsize);
5407
5408 // copy ecc data
5409 for (i = 0; i < chip->ecc.layout->eccbytes; i++)
5410 {
5411 iter = (i / OOB_AVAI_PER_SECTOR) * spare_per_sector + OOB_AVAI_PER_SECTOR + i % OOB_AVAI_PER_SECTOR;
5412 chip->oob_poi[chip->ecc.layout->eccpos[i]] = local_oob_buf[iter];
5413 }
5414
5415 // copy FDM data
5416 for (i = 0; i < sec_num; i++)
5417 {
5418 memcpy(&chip->oob_poi[i * OOB_AVAI_PER_SECTOR], &local_oob_buf[i * spare_per_sector], OOB_AVAI_PER_SECTOR);
5419 }
5420
5421 return 0;
5422}
5423
5424static int mtk_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
5425{
5426// int block_size = 1 << (chip->phys_erase_shift);
5427// int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
5428// int block;
5429// u16 page_in_block;
5430// int mapped_block;
5431 //u8* buf = (u8*)kzalloc(mtd->writesize, GFP_KERNEL);
5432
5433 //page = mtk_nand_page_transform(mtd,chip,page,&block,&mapped_block);
5434#if 0
5435 if(block_size != mtd->erasesize)
5436 {
5437 page_per_block1 = page_per_block>>1;
5438 }
5439 block = page / page_per_block1;
5440 mapped_block = get_mapping_block_index(block);
5441 if(block_size != mtd->erasesize)
5442 page_in_block = devinfo.feature_set.PairPage[page % page_per_block1];
5443 else
5444 page_in_block = page % page_per_block1;
5445
5446 mtk_nand_read_oob_hw(mtd, chip, page_in_block + mapped_block * page_per_block);
5447#else
5448 mtk_nand_read_page(mtd,chip,temp_buffer_16_align,page);
5449 //kfree(buf);
5450#endif
5451
5452 return 0; // the return value is sndcmd
5453}
5454
5455int mtk_nand_block_bad_hw(struct mtd_info *mtd, loff_t ofs)
5456{
5457 struct nand_chip *chip = (struct nand_chip *)mtd->priv;
5458 int page_addr = (int)(ofs >> chip->page_shift);
5459 u32 block, mapped_block;
5460 int ret;
5461 unsigned int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
5462
5463 //unsigned char oob_buf[128];
5464 //char* buf = (char*) kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
5465
5466 //page_addr = mtk_nand_page_transform(mtd, chip, page_addr, &block, &mapped_block);
5467
5468 page_addr &= ~(page_per_block - 1);
5469
5470 //ret = mtk_nand_read_page(mtd,chip,buf,(ofs >> chip->page_shift));
5471 memset(temp_buffer_16_align,0xFF,LPAGE);
5472 ret = mtk_nand_read_subpage(mtd,chip,temp_buffer_16_align,(ofs >> chip->page_shift),0, 1);
5473 page_addr = mtk_nand_page_transform(mtd, chip, page_addr, &block, &mapped_block);
5474 //ret = mtk_nand_exec_read_page(mtd, page_addr+mapped_block*page_per_block, mtd->writesize, buf, oob_buf);
5475 if (0 != ret)
5476 {
5477 printk(KERN_WARNING "mtk_nand_read_oob_raw return error %d\n",ret);
5478// kfree(buf);
5479 return 1;
5480 }
5481
5482 if (chip->oob_poi[0] != 0xff)
5483 {
5484 printk(KERN_WARNING "Bad block detected at 0x%x, oob_buf[0] is 0x%x\n", block*page_per_block, chip->oob_poi[0]);
5485 //kfree(buf);
5486 // dump_nfi();
5487 return 1;
5488 }
5489 //kfree(buf);
5490 return 0; // everything is OK, good block
5491}
5492
5493static int mtk_nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
5494{
5495 int chipnr = 0;
5496
5497 struct nand_chip *chip = (struct nand_chip *)mtd->priv;
5498 int block = (int)(ofs >> chip->phys_erase_shift);
5499 int mapped_block;
5500 int page = (int)(ofs >> chip->page_shift);
5501 int page_in_block;
5502 int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
5503
5504 int ret;
5505
5506 if (getchip)
5507 {
5508 chipnr = (int)(ofs >> chip->chip_shift);
5509 nand_get_device(mtd, FL_READING);
5510 /* Select the NAND device */
5511 chip->select_chip(mtd, chipnr);
5512 }
5513 //page = mtk_nand_page_transform(mtd, chip, page, &block, &mapped_block);
5514// mapped_block = get_mapping_block_index(block);
5515
5516 ret = mtk_nand_block_bad_hw(mtd, ofs);
5517 page_in_block = mtk_nand_page_transform(mtd, chip, page, &block, &mapped_block);
5518
5519 if (ret)
5520 {
5521 MSG(INIT, "Unmapped bad block: 0x%x %d\n", mapped_block,ret);
5522 if (update_bmt((u64)((u64)page_in_block + (u64)mapped_block * page_per_block)<<chip->page_shift, UPDATE_UNMAPPED_BLOCK, NULL, NULL))
5523 {
5524 MSG(INIT, "Update BMT success\n");
5525 ret = 0;
5526 } else
5527 {
5528 MSG(INIT, "Update BMT fail\n");
5529 ret = 1;
5530 }
5531 }
5532
5533 if (getchip)
5534 {
5535 nand_release_device(mtd);
5536 }
5537
5538 return ret;
5539}
5540/******************************************************************************
5541 * mtk_nand_init_size
5542 *
5543 * DESCRIPTION:
5544 * initialize the pagesize, oobsize, blocksize
5545 *
5546 * PARAMETERS:
5547 * struct mtd_info *mtd, struct nand_chip *this, u8 *id_data
5548 *
5549 * RETURNS:
5550 * Buswidth
5551 *
5552 * NOTES:
5553 * None
5554 *
5555 ******************************************************************************/
5556
5557static int mtk_nand_init_size(struct mtd_info *mtd, struct nand_chip *this, u8 *id_data)
5558{
5559 /* Get page size */
5560 mtd->writesize = devinfo.pagesize ;
5561
5562 /* Get oobsize */
5563 mtd->oobsize = devinfo.sparesize;
5564
5565 /* Get blocksize. */
5566 mtd->erasesize = devinfo.blocksize*1024;
5567 /* Get buswidth information */
5568 if(devinfo.iowidth==16)
5569 {
5570 return NAND_BUSWIDTH_16;
5571 }
5572 else
5573 {
5574 return 0;
5575 }
5576
5577}
5578
5579/******************************************************************************
5580 * mtk_nand_verify_buf
5581 *
5582 * DESCRIPTION:
5583 * Verify the NAND write data is correct or not !
5584 *
5585 * PARAMETERS:
5586 * struct mtd_info *mtd, const uint8_t *buf, int len
5587 *
5588 * RETURNS:
5589 * None
5590 *
5591 * NOTES:
5592 * None
5593 *
5594 ******************************************************************************/
5595#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
5596
5597char gacBuf[LPAGE + LSPARE];
5598
5599static int mtk_nand_verify_buf(struct mtd_info *mtd, const uint8_t * buf, int len)
5600{
5601#if 1
5602 struct nand_chip *chip = (struct nand_chip *)mtd->priv;
5603 struct NAND_CMD *pkCMD = &g_kCMD;
5604 u32 u4PageSize = mtd->writesize;
5605 u32 *pSrc, *pDst;
5606 int i;
5607
5608 mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, gacBuf, gacBuf + u4PageSize);
5609
5610 pSrc = (u32 *) buf;
5611 pDst = (u32 *) gacBuf;
5612 len = len / sizeof(u32);
5613 for (i = 0; i < len; ++i)
5614 {
5615 if (*pSrc != *pDst)
5616 {
5617 MSG(VERIFY, "mtk_nand_verify_buf page fail at page %d\n", pkCMD->u4RowAddr);
5618 return -1;
5619 }
5620 pSrc++;
5621 pDst++;
5622 }
5623
5624 pSrc = (u32 *) chip->oob_poi;
5625 pDst = (u32 *) (gacBuf + u4PageSize);
5626
5627 if ((pSrc[0] != pDst[0]) || (pSrc[1] != pDst[1]) || (pSrc[2] != pDst[2]) || (pSrc[3] != pDst[3]) || (pSrc[4] != pDst[4]) || (pSrc[5] != pDst[5]))
5628 // TODO: Ask Designer Why?
5629 //(pSrc[6] != pDst[6]) || (pSrc[7] != pDst[7]))
5630 {
5631 MSG(VERIFY, "mtk_nand_verify_buf oob fail at page %d\n", pkCMD->u4RowAddr);
5632 MSG(VERIFY, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", pSrc[0], pSrc[1], pSrc[2], pSrc[3], pSrc[4], pSrc[5], pSrc[6], pSrc[7]);
5633 MSG(VERIFY, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", pDst[0], pDst[1], pDst[2], pDst[3], pDst[4], pDst[5], pDst[6], pDst[7]);
5634 return -1;
5635 }
5636 /*
5637 for (i = 0; i < len; ++i) {
5638 if (*pSrc != *pDst) {
5639 printk(KERN_ERR"mtk_nand_verify_buf oob fail at page %d\n", g_kCMD.u4RowAddr);
5640 return -1;
5641 }
5642 pSrc++;
5643 pDst++;
5644 }
5645 */
5646 //printk(KERN_INFO"mtk_nand_verify_buf OK at page %d\n", g_kCMD.u4RowAddr);
5647
5648 return 0;
5649#else
5650 return 0;
5651#endif
5652}
5653#endif
5654
5655/******************************************************************************
5656 * mtk_nand_init_hw
5657 *
5658 * DESCRIPTION:
5659 * Initial NAND device hardware component !
5660 *
5661 * PARAMETERS:
5662 * struct mtk_nand_host *host (Initial setting data)
5663 *
5664 * RETURNS:
5665 * None
5666 *
5667 * NOTES:
5668 * None
5669 *
5670 ******************************************************************************/
5671static void mtk_nand_init_hw(struct mtk_nand_host *host)
5672{
5673 struct mtk_nand_host_hw *hw = host->hw;
5674
5675
5676 g_bInitDone = false;
5677 g_kCMD.u4OOBRowAddr = (u32) - 1;
5678
5679 /* Set default NFI access timing control */
5680 DRV_WriteReg32(NFI_ACCCON_REG32, hw->nfi_access_timing);
5681 DRV_WriteReg16(NFI_CNFG_REG16, 0);
5682 DRV_WriteReg16(NFI_PAGEFMT_REG16, 4);
5683 DRV_WriteReg32(NFI_ENMPTY_THRESH_REG32, 40);
5684
5685 /* Reset the state machine and data FIFO, because flushing FIFO */
5686 (void)mtk_nand_reset();
5687
5688 /* Set the ECC engine */
5689 if (hw->nand_ecc_mode == NAND_ECC_HW)
5690 {
5691 MSG(INIT, "%s : Use HW ECC\n", MODULE_NAME);
5692 if (g_bHwEcc)
5693 {
5694 NFI_SET_REG32(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
5695 }
5696 ECC_Config(host->hw,4);
5697 mtk_nand_configure_fdm(8);
5698 }
5699
5700 /* Initilize interrupt. Clear interrupt, read clear. */
5701 DRV_Reg16(NFI_INTR_REG16);
5702
5703 /* Interrupt arise when read data or program data to/from AHB is done. */
5704 DRV_WriteReg16(NFI_INTR_EN_REG16, 0);
5705
5706 // Enable automatic disable ECC clock when NFI is busy state
5707 DRV_WriteReg16(NFI_DEBUG_CON1_REG16, (NFI_BYPASS|WBUF_EN|HWDCM_SWCON_ON));
5708
5709 #ifdef CONFIG_PM
5710 host->saved_para.suspend_flag = 0;
5711 #endif
5712 // Reset
5713}
5714
5715//-------------------------------------------------------------------------------
5716static int mtk_nand_dev_ready(struct mtd_info *mtd)
5717{
5718 return !(DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY);
5719}
5720
5721/******************************************************************************
5722 * mtk_nand_proc_read
5723 *
5724 * DESCRIPTION:
5725 * Read the proc file to get the interrupt scheme setting !
5726 *
5727 * PARAMETERS:
5728 * char *page, char **start, off_t off, int count, int *eof, void *data
5729 *
5730 * RETURNS:
5731 * None
5732 *
5733 * NOTES:
5734 * None
5735 *
5736 ******************************************************************************/
5737int mtk_nand_proc_read(struct file *file, char *buffer, size_t count, loff_t *ppos)
5738{
5739 char *p = buffer;
5740 int len = 0;
5741 int i;
5742 p += sprintf(p, "ID:");
5743 for(i=0;i<devinfo.id_length;i++){
5744 p += sprintf(p, " 0x%x", devinfo.id[i]);
5745 }
5746 p += sprintf(p, "\n");
5747 p += sprintf(p, "total size: %dMiB; part number: %s\n", devinfo.totalsize,devinfo.devciename);
5748 p += sprintf(p, "Current working in %s mode\n", g_i4Interrupt ? "interrupt" : "polling");
5749 p += sprintf(p, "NFI_ACCON(0x%x)=0x%x\n",(NFI_BASE+0x000C),DRV_Reg32(NFI_ACCCON_REG32));
5750 p += sprintf(p, "NFI_NAND_TYPE_CNFG_REG32= 0x%x\n",DRV_Reg32(NFI_NAND_TYPE_CNFG_REG32));
5751#if CFG_FPGA_PLATFORM
5752 p += sprintf(p, "[FPGA Dummy]DRV_CFG_NFIA(0x0)=0x0\n");
5753 p += sprintf(p, "[FPGA Dummy]DRV_CFG_NFIB(0x0)=0x0\n");
5754#else
5755 p += sprintf(p, "DRV_CFG_NFIA(IO PAD:0x%x)=0x%x\n",(GPIO_BASE+0xC20),*((volatile u32 *)(GPIO_BASE+0xC20)));
5756 p += sprintf(p, "DRV_CFG_NFIB(CTRL PAD:0x%x)=0x%x\n",(GPIO_BASE+0xB50),*((volatile u32 *)(GPIO_BASE+0xB50)));
5757#endif
5758#if CFG_PERFLOG_DEBUG
5759 p += sprintf(p, "Read Page Count:%d, Read Page totalTime:%lu, Avg. RPage:%lu\r\n",
5760 g_NandPerfLog.ReadPageCount,g_NandPerfLog.ReadPageTotalTime,
5761 g_NandPerfLog.ReadPageCount ? (g_NandPerfLog.ReadPageTotalTime/g_NandPerfLog.ReadPageCount): 0);
5762
5763 p += sprintf(p, "Read subPage Count:%d, Read subPage totalTime:%lu, Avg. RPage:%lu\r\n",
5764 g_NandPerfLog.ReadSubPageCount,g_NandPerfLog.ReadSubPageTotalTime,
5765 g_NandPerfLog.ReadSubPageCount? (g_NandPerfLog.ReadSubPageTotalTime/g_NandPerfLog.ReadSubPageCount): 0);
5766
5767 p += sprintf(p, "Read Busy Count:%d, Read Busy totalTime:%lu, Avg. R Busy:%lu\r\n",
5768 g_NandPerfLog.ReadBusyCount,g_NandPerfLog.ReadBusyTotalTime,
5769 g_NandPerfLog.ReadBusyCount? (g_NandPerfLog.ReadBusyTotalTime/g_NandPerfLog.ReadBusyCount): 0);
5770
5771 p += sprintf(p, "Read DMA Count:%d, Read DMA totalTime:%lu, Avg. R DMA:%lu\r\n",
5772 g_NandPerfLog.ReadDMACount,g_NandPerfLog.ReadDMATotalTime,
5773 g_NandPerfLog.ReadDMACount? (g_NandPerfLog.ReadDMATotalTime/g_NandPerfLog.ReadDMACount): 0);
5774
5775 p += sprintf(p, "Write Page Count:%d, Write Page totalTime:%lu, Avg. WPage:%lu\r\n",
5776 g_NandPerfLog.WritePageCount,g_NandPerfLog.WritePageTotalTime,
5777 g_NandPerfLog.WritePageCount? (g_NandPerfLog.WritePageTotalTime/g_NandPerfLog.WritePageCount): 0);
5778
5779 p += sprintf(p, "Write Busy Count:%d, Write Busy totalTime:%lu, Avg. W Busy:%lu\r\n",
5780 g_NandPerfLog.WriteBusyCount,g_NandPerfLog.WriteBusyTotalTime,
5781 g_NandPerfLog.WriteBusyCount? (g_NandPerfLog.WriteBusyTotalTime/g_NandPerfLog.WriteBusyCount): 0);
5782
5783 p += sprintf(p, "Write DMA Count:%d, Write DMA totalTime:%lu, Avg. W DMA:%lu\r\n",
5784 g_NandPerfLog.WriteDMACount,g_NandPerfLog.WriteDMATotalTime,
5785 g_NandPerfLog.WriteDMACount? (g_NandPerfLog.WriteDMATotalTime/g_NandPerfLog.WriteDMACount): 0);
5786
5787 p += sprintf(p, "EraseBlock Count:%d, EraseBlock totalTime:%lu, Avg. Erase:%lu\r\n",
5788 g_NandPerfLog.EraseBlockCount,g_NandPerfLog.EraseBlockTotalTime,
5789 g_NandPerfLog.EraseBlockCount? (g_NandPerfLog.EraseBlockTotalTime/g_NandPerfLog.EraseBlockCount): 0);
5790
5791#endif
5792 len = p - buffer;
5793
5794 return len < count ? len : count;
5795}
5796
5797/******************************************************************************
5798 * mtk_nand_proc_write
5799 *
5800 * DESCRIPTION:
5801 * Write the proc file to set the interrupt scheme !
5802 *
5803 * PARAMETERS:
5804 * struct file* file, const char* buffer, unsigned long count, void *data
5805 *
5806 * RETURNS:
5807 * None
5808 *
5809 * NOTES:
5810 * None
5811 *
5812 ******************************************************************************/
5813int mtk_nand_proc_write(struct file *file, const char *buffer, unsigned long count, void *data)
5814{
5815 struct mtd_info *mtd = &host->mtd;
5816 char buf[16];
5817 char cmd;
5818 int value;
5819 int len = count;//, n;
5820
5821 if (len >= sizeof(buf))
5822 {
5823 len = sizeof(buf) - 1;
5824 }
5825
5826 if (copy_from_user(buf, buffer, len))
5827 {
5828 return -EFAULT;
5829 }
5830
5831 sscanf(buf, "%c%x",&cmd, &value);
5832
5833 switch(cmd)
5834 {
5835 case 'A': // NFIA driving setting
5836 #if CFG_FPGA_PLATFORM
5837 printk(KERN_INFO "[FPGA Dummy]NFIA driving setting\n");
5838 #else
5839 if ((value >= 0x0) && (value <= 0x7)) // driving step
5840 {
5841 printk(KERN_INFO "[NAND]IO PAD driving setting value(0x%x)\n\n", value);
5842 *((volatile u32 *)(GPIO_BASE+0xC20)) = value; //pad 7 6 4 3 0 1 5 8 2
5843 }
5844 else
5845 printk(KERN_ERR "[NAND]IO PAD driving setting value(0x%x) error\n", value);
5846 #endif
5847 break;
5848 case 'B': // NFIB driving setting
5849 #if CFG_FPGA_PLATFORM
5850 printk(KERN_INFO "[FPGA Dummy]NFIB driving setting\n");
5851 #else
5852 if ((value >= 0x0) && (value <= 0x7)) // driving step
5853 {
5854 printk(KERN_INFO "[NAND]Ctrl PAD driving setting value(0x%x)\n\n", value);
5855 *((volatile u32 *)(GPIO_BASE+0xB50)) = value; //CLE CE1 CE0 RE RB
5856 *((volatile u32 *)(GPIO_BASE+0xC10)) = value; //ALE
5857 *((volatile u32 *)(GPIO_BASE+0xC00)) = value; //WE
5858 }
5859 else
5860 printk(KERN_ERR "[NAND]Ctrl PAD driving setting value(0x%x) error\n", value);
5861 #endif
5862 break;
5863 case 'D':
5864 #ifdef _MTK_NAND_DUMMY_DRIVER_
5865 printk(KERN_INFO "Enable dummy driver\n");
5866 dummy_driver_debug = 1;
5867 #endif
5868 break;
5869 case 'I': // Interrupt control
5870 if ((value > 0 && !g_i4Interrupt) || (value== 0 && g_i4Interrupt))
5871 {
5872 nand_get_device(mtd, FL_READING);
5873
5874 g_i4Interrupt = value;
5875
5876 if (g_i4Interrupt)
5877 {
5878 DRV_Reg16(NFI_INTR_REG16);
5879 enable_irq(MT_NFI_IRQ_ID);
5880 } else
5881 disable_irq(MT_NFI_IRQ_ID);
5882
5883 nand_release_device(mtd);
5884 }
5885 break;
5886 case 'P': // Reset Performance monitor counter
5887 #ifdef NAND_PFM
5888 /* Reset values */
5889 g_PFM_R = 0;
5890 g_PFM_W = 0;
5891 g_PFM_E = 0;
5892 g_PFM_RD = 0;
5893 g_PFM_WD = 0;
5894 g_kCMD.pureReadOOBNum = 0;
5895 #endif
5896 break;
5897 case 'R': // Reset NFI performance log
5898 #if CFG_PERFLOG_DEBUG
5899 g_NandPerfLog.ReadPageCount = 0;
5900 g_NandPerfLog.ReadPageTotalTime = 0;
5901 g_NandPerfLog.ReadBusyCount = 0;
5902 g_NandPerfLog.ReadBusyTotalTime = 0;
5903 g_NandPerfLog.ReadDMACount = 0;
5904 g_NandPerfLog.ReadDMATotalTime = 0;
5905 g_NandPerfLog.ReadSubPageCount = 0;
5906 g_NandPerfLog.ReadSubPageTotalTime = 0;
5907
5908 g_NandPerfLog.WritePageCount = 0;
5909 g_NandPerfLog.WritePageTotalTime = 0;
5910 g_NandPerfLog.WriteBusyCount = 0;
5911 g_NandPerfLog.WriteBusyTotalTime = 0;
5912 g_NandPerfLog.WriteDMACount = 0;
5913 g_NandPerfLog.WriteDMATotalTime = 0;
5914
5915 g_NandPerfLog.EraseBlockCount = 0;
5916 g_NandPerfLog.EraseBlockTotalTime = 0;
5917 #endif
5918 break;
5919 case 'T': // ACCCON Setting
5920 nand_get_device(mtd, FL_READING);
5921 DRV_WriteReg32(NFI_ACCCON_REG32,value);
5922 nand_release_device(mtd);
5923 break;
5924 default:
5925 break;
5926 }
5927
5928 return len;
5929}
5930
5931#define EFUSE_GPIO_CFG ((volatile u32 *)(0xF02061c0))
5932#define EFUSE_GPIO_1_8_ENABLE 0x00000008
5933static unsigned short NFI_gpio_uffs(unsigned short x)
5934{
5935 unsigned int r = 1;
5936
5937 if (!x)
5938 return 0;
5939
5940 if (!(x & 0xff)) {
5941 x >>= 8;
5942 r += 8;
5943 }
5944
5945 if (!(x & 0xf)) {
5946 x >>= 4;
5947 r += 4;
5948 }
5949
5950 if (!(x & 3)) {
5951 x >>= 2;
5952 r += 2;
5953 }
5954
5955 if (!(x & 1)) {
5956 x >>= 1;
5957 r += 1;
5958 }
5959
5960 return r;
5961}
5962
5963static void NFI_GPIO_SET_FIELD(U32 reg, U32 field, U32 val)
5964{
5965 unsigned short tv = (unsigned short)(*(volatile u16*)(reg));
5966 tv &= ~(field);
5967 tv |= ((val) << (NFI_gpio_uffs((unsigned short)(field)) - 1));
5968 (*(volatile u16*)(reg) = (u16)(tv));
5969}
5970
5971static void mtk_nand_gpio_init(void)
5972{
5973 NFI_GPIO_SET_FIELD(GPIO_BASE+0xc00, 0x700, 0x2); //pullup with 50Kohm ----PAD_MSDC0_CLK for 1.8v/3.3v
5974 NFI_GPIO_SET_FIELD(GPIO_BASE+0xc10, 0x700, 0x3); //pulldown with 50Kohm ----PAD_MSDC0_CMD for 1.8v/3.3v
5975 NFI_GPIO_SET_FIELD(GPIO_BASE+0xc30, 0x70, 0x3); //pulldown with 50Kohm ----PAD_MSDC0_DAT1 for 1.8v/3.3v
5976 mt_set_gpio_mode(GPIO46, GPIO_MODE_06);
5977 mt_set_gpio_mode(GPIO47, GPIO_MODE_06);
5978 mt_set_gpio_mode(GPIO48, GPIO_MODE_06);
5979 mt_set_gpio_mode(GPIO49, GPIO_MODE_06);
5980 mt_set_gpio_mode(GPIO127, GPIO_MODE_04);
5981 mt_set_gpio_mode(GPIO128, GPIO_MODE_04);
5982 mt_set_gpio_mode(GPIO129, GPIO_MODE_04);
5983 mt_set_gpio_mode(GPIO130, GPIO_MODE_04);
5984 mt_set_gpio_mode(GPIO131, GPIO_MODE_04);
5985 mt_set_gpio_mode(GPIO132, GPIO_MODE_04);
5986 mt_set_gpio_mode(GPIO133, GPIO_MODE_04);
5987 mt_set_gpio_mode(GPIO134, GPIO_MODE_04);
5988 mt_set_gpio_mode(GPIO135, GPIO_MODE_04);
5989 mt_set_gpio_mode(GPIO136, GPIO_MODE_04);
5990 mt_set_gpio_mode(GPIO137, GPIO_MODE_05);
5991 mt_set_gpio_mode(GPIO142, GPIO_MODE_01);
5992
5993 mt_set_gpio_pull_enable(GPIO142, 1);
5994 mt_set_gpio_pull_select(GPIO142, 1);
5995
5996 if(!( (*EFUSE_GPIO_CFG)&EFUSE_GPIO_1_8_ENABLE)) //3.3v
5997 {
5998 printk("3.3V\n");
5999 NFI_GPIO_SET_FIELD(GPIO_BASE+0xd70, 0xf, 0x0a); /* TDSEL change value to 0x0a*/
6000 NFI_GPIO_SET_FIELD(GPIO_BASE+0xd70, 0x3f0, 0x0c); /* RDSEL change value to 0x0c*/
6001
6002 NFI_GPIO_SET_FIELD(GPIO_BASE+0xc60, 0xf, 0x0a); /* TDSEL change value to 0x0a*/
6003 NFI_GPIO_SET_FIELD(GPIO_BASE+0xc60, 0x3f0, 0x0c); /* RDSEL change value to 0x0c*/
6004 }
6005 else //1.8v
6006 {
6007 printk("1.8V\n");
6008 NFI_GPIO_SET_FIELD(GPIO_BASE+0xd70, 0xf, 0x0a); /* TDSEL change value to 0x0a*/
6009 NFI_GPIO_SET_FIELD(GPIO_BASE+0xd70, 0x3f0, 0x00); /* RDSEL change value to 0x0c*/
6010
6011 NFI_GPIO_SET_FIELD(GPIO_BASE+0xc60, 0xf, 0x0a); /* TDSEL change value to 0x0a*/
6012 NFI_GPIO_SET_FIELD(GPIO_BASE+0xc60, 0x3f0, 0x00); /* RDSEL change value to 0x0c*/
6013 }
6014 NFI_GPIO_SET_FIELD(GPIO_BASE+0xc00, 0x7, 0x3); //set CLK driving more than 4mA default:0x3
6015 NFI_GPIO_SET_FIELD(GPIO_BASE+0xc10, 0x7, 0x3); //set CMD driving more than 4mA
6016 NFI_GPIO_SET_FIELD(GPIO_BASE+0xc20, 0x7, 0x3); //set DAT driving more than 4mA
6017 NFI_GPIO_SET_FIELD(GPIO_BASE+0xb50, 0x7, 0x3); //set NFI_PAD driving more than 4mA
6018 DRV_WriteReg32(GPIO_BASE+0xe20, DRV_Reg32(GPIO_BASE+0xe20) | 0x5 | (0x5 << 12));//NFI_BIAS_CTRL, temp solution
6019 //DRV_WriteReg32(GPIO_BASE+0x180, 0x7FFF);
6020 //DRV_WriteReg32(GPIO_BASE+0x280, 0x7FDF);
6021}
6022
6023
6024/******************************************************************************
6025 * mtk_nand_probe
6026 *
6027 * DESCRIPTION:
6028 * register the nand device file operations !
6029 *
6030 * PARAMETERS:
6031 * struct platform_device *pdev : device structure
6032 *
6033 * RETURNS:
6034 * 0 : Success
6035 *
6036 * NOTES:
6037 * None
6038 *
6039 ******************************************************************************/
6040#define KERNEL_NAND_UNIT_TEST 0
6041#define NAND_READ_PERFORMANCE 0
6042#if KERNEL_NAND_UNIT_TEST
6043int mtk_nand_unit_test(struct nand_chip *nand_chip, struct mtd_info *mtd)
6044{
6045 MSG(INIT, "Begin to Kernel nand unit test ... \n");
6046 int err = 0;
6047 int patternbuff[128] = {
6048 0x0103D901, 0xFF1802DF, 0x01200400, 0x00000021, 0x02040122, 0x02010122, 0x03020407, 0x1A050103,
6049 0x00020F1B, 0x08C0C0A1, 0x01550800, 0x201B0AC1, 0x41990155, 0x64F0FFFF, 0x201B0C82, 0x4118EA61,
6050 0xF00107F6, 0x0301EE1B, 0x0C834118, 0xEA617001, 0x07760301, 0xEE151405, 0x00202020, 0x20202020,
6051 0x00202020, 0x2000302E, 0x3000FF14, 0x00FF0000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6052 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6053 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6054 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6055 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6056 0x01D90301, 0xDF0218FF, 0x00042001, 0x21000000, 0x22010402, 0x22010102, 0x07040203, 0x0301051A,
6057 0x1B0F0200, 0xA1C0C008, 0x00085501, 0xC10A1B20, 0x55019941, 0xFFFFF064, 0x820C1B20, 0x61EA1841,
6058 0xF60701F0, 0x1BEE0103, 0x1841830C, 0x017061EA, 0x01037607, 0x051415EE, 0x20202000, 0x20202020,
6059 0x20202000, 0x2E300020, 0x14FF0030, 0x0000FF00, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6060 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6061 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6062 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6063 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000
6064 };
6065 u32 j, k, p = g_block_size/g_page_size;
6066 printk("[P] %x\n", p);
6067 struct gFeatureSet *feature_set = &(devinfo.feature_set.FeatureSet);
6068 u32 val = 0x05, TOTAL=1000;
6069 for (j = 0x400; j< 0x7A0; j++)
6070 {
6071 memset(local_buffer, 0x00, 8192);
6072 mtk_nand_read_page(mtd, nand_chip, local_buffer, j*p);
6073 MSG(INIT,"[1]0x%x %x %x %x\n", *(int *)local_buffer, *((int *)local_buffer+1), *((int *)local_buffer+2), *((int *)local_buffer+3));
6074 mtk_nand_erase(mtd, j*p);
6075 memset(local_buffer, 0x00, 8192);
6076 if(mtk_nand_read_page(mtd, nand_chip, local_buffer, j*p))
6077 printk("Read page 0x%x fail!\n", j*p);
6078 MSG(INIT,"[2]0x%x %x %x %x\n", *(int *)local_buffer, *((int *)local_buffer+1), *((int *)local_buffer+2), *((int *)local_buffer+3));
6079 if (mtk_nand_block_bad(mtd, j*g_block_size, 0))
6080 {
6081 printk("Bad block at %x\n", j);
6082 continue;
6083 }
6084 for (k = 0; k < p; k++)
6085 {
6086 if(mtk_nand_write_page(mtd, nand_chip,(u8 *)patternbuff, j*p+k, 0, 0))
6087 printk("Write page 0x%x fail!\n", j*p+k);
6088 #if 1
6089 }
6090 TOTAL=1000;
6091 do{
6092 for (k = 0; k < p; k++)
6093 {
6094 #endif
6095 memset(local_buffer, 0x00, g_page_size);
6096 if(mtk_nand_read_page(mtd, nand_chip, local_buffer, j*p+k))
6097 printk("Read page 0x%x fail!\n", j*p+k);
6098 MSG(INIT,"[3]0x%x %x %x %x\n", *(int *)local_buffer, *((int *)local_buffer+1), *((int *)local_buffer+2), *((int *)local_buffer+3));
6099 if(memcmp((u8 *)patternbuff, local_buffer, 128*4))
6100 {
6101 MSG(INIT, "[KERNEL_NAND_UNIT_TEST] compare fail!\n");
6102 err = -1;
6103 while(1);
6104 }else
6105 {
6106 TOTAL--;
6107 MSG(INIT, "[KERNEL_NAND_UNIT_TEST] compare OK!\n");
6108 }
6109 }
6110 }while(TOTAL);
6111 #if 0
6112 mtk_nand_SetFeature(mtd, (u16) feature_set->sfeatureCmd, \
6113 feature_set->Async_timing.address, (u8 *)&val,\
6114 sizeof(feature_set->Async_timing.feature));
6115 mtk_nand_GetFeature(mtd, feature_set->gfeatureCmd, \
6116 feature_set->Async_timing.address, (u8 *)&val,4);
6117 printk("[ASYNC Interface]0x%X\n", val);
6118 err = mtk_nand_interface_config(mtd);
6119 MSG(INIT, "[nand_interface_config] %d\n",err);
6120 #endif
6121 }
6122 return err;
6123}
6124#endif
6125
6126#if CFG_2CS_NAND
6127//#define CHIP_ADDRESS (0x100000)
6128static int mtk_nand_cs_check(struct mtd_info *mtd, u8 *id, u16 cs)
6129{
6130 u8 ids[NAND_MAX_ID];
6131 int i = 0;
6132 //if(devinfo.ttarget == TTYPE_2DIE)
6133 //{
6134 // MSG(INIT,"2 Die Flash\n");
6135 // g_bTricky_CS = TRUE;
6136 // return 0;
6137 //}
6138 DRV_WriteReg16(NFI_CSEL_REG16, cs);
6139 mtk_nand_command_bp(mtd, NAND_CMD_READID, 0, -1);
6140 for(i=0;i<NAND_MAX_ID;i++)
6141 {
6142 ids[i]=mtk_nand_read_byte(mtd);
6143 if(ids[i] != id[i])
6144 {
6145 MSG(INIT, "Nand cs[%d] not support(%d,%x)\n", cs, i, ids[i]);
6146 DRV_WriteReg16(NFI_CSEL_REG16, NFI_DEFAULT_CS);
6147
6148 return 0;
6149 }
6150 }
6151 DRV_WriteReg16(NFI_CSEL_REG16, NFI_DEFAULT_CS);
6152 return 1;
6153}
6154
6155static u32 mtk_nand_cs_on(struct nand_chip *nand_chip, u16 cs, u32 page)
6156{
6157 u32 cs_page = page / g_nanddie_pages;
6158 if(cs_page)
6159 {
6160 DRV_WriteReg16(NFI_CSEL_REG16, cs);
6161 //if(devinfo.ttarget == TTYPE_2DIE)
6162 // return page;//return (page | CHIP_ADDRESS);
6163 return (page - g_nanddie_pages);
6164 }
6165 DRV_WriteReg16(NFI_CSEL_REG16, NFI_DEFAULT_CS);
6166 return page;
6167}
6168
6169#else
6170
6171#define mtk_nand_cs_check(mtd, id, cs) (1)
6172#define mtk_nand_cs_on(nand_chip, cs, page) (page)
6173#endif
6174
6175static int mtk_nand_probe(struct platform_device *pdev)
6176{
6177
6178 struct mtk_nand_host_hw *hw;
6179 struct mtd_info *mtd;
6180 struct nand_chip *nand_chip;
6181 struct resource *res = pdev->resource;
6182 int err = 0;
6183 u8 id[NAND_MAX_ID];
6184 int i;
6185 u32 sector_size = NAND_SECTOR_SIZE;
6186#if CFG_COMBO_NAND
6187 int bmt_sz = 0;
6188#endif
6189
6190 #ifdef MTK_PMIC_MT6397
6191 hwPowerOn(MT65XX_POWER_LDO_VMCH, VOL_3300, "NFI");
6192 #else
6193 hwPowerOn(MT6323_POWER_LDO_VMCH, VOL_3300, "NFI");
6194 #endif
6195
6196 hw = (struct mtk_nand_host_hw *)pdev->dev.platform_data;
6197 BUG_ON(!hw);
6198
6199 if (pdev->num_resources != 4 || res[0].flags != IORESOURCE_MEM || res[1].flags != IORESOURCE_MEM || res[2].flags != IORESOURCE_IRQ || res[3].flags != IORESOURCE_IRQ)
6200 {
6201 MSG(INIT, "%s: invalid resource type\n", __FUNCTION__);
6202 return -ENODEV;
6203 }
6204
6205 /* Request IO memory */
6206 if (!request_mem_region(res[0].start, res[0].end - res[0].start + 1, pdev->name))
6207 {
6208 return -EBUSY;
6209 }
6210 if (!request_mem_region(res[1].start, res[1].end - res[1].start + 1, pdev->name))
6211 {
6212 return -EBUSY;
6213 }
6214
6215 /* Allocate memory for the device structure (and zero it) */
6216 host = kzalloc(sizeof(struct mtk_nand_host), GFP_KERNEL);
6217 if (!host)
6218 {
6219 MSG(INIT, "mtk_nand: failed to allocate device structure.\n");
6220 return -ENOMEM;
6221 }
6222
6223 /* Allocate memory for 16 byte aligned buffer */
6224 local_buffer_16_align = local_buffer;
6225 temp_buffer_16_align = temp_buffer;
6226 //printk(KERN_INFO "Allocate 16 byte aligned buffer: %p\n", local_buffer_16_align);
6227
6228 host->hw = hw;
6229 PL_TIME_PROG(10);
6230 PL_TIME_ERASE(10);
6231 PL_TIME_PROG_WDT_SET(1);
6232 PL_TIME_ERASE_WDT_SET(1);
6233
6234 /* init mtd data structure */
6235 nand_chip = &host->nand_chip;
6236 nand_chip->priv = host; /* link the private data structures */
6237
6238 mtd = &host->mtd;
6239 mtd->priv = nand_chip;
6240 mtd->owner = THIS_MODULE;
6241 mtd->name = "MTK-Nand";
6242 mtd->eraseregions = host->erase_region;
6243
6244 hw->nand_ecc_mode = NAND_ECC_HW;
6245
6246 /* Set address of NAND IO lines */
6247 nand_chip->IO_ADDR_R = (void __iomem *)NFI_DATAR_REG32;
6248 nand_chip->IO_ADDR_W = (void __iomem *)NFI_DATAW_REG32;
6249 nand_chip->chip_delay = 20; /* 20us command delay time */
6250 nand_chip->ecc.mode = hw->nand_ecc_mode; /* enable ECC */
6251
6252 nand_chip->read_byte = mtk_nand_read_byte;
6253 nand_chip->read_buf = mtk_nand_read_buf;
6254 nand_chip->write_buf = mtk_nand_write_buf;
6255#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
6256 nand_chip->verify_buf = mtk_nand_verify_buf;
6257#endif
6258 nand_chip->select_chip = mtk_nand_select_chip;
6259 nand_chip->dev_ready = mtk_nand_dev_ready;
6260 nand_chip->cmdfunc = mtk_nand_command_bp;
6261 nand_chip->ecc.read_page = mtk_nand_read_page_hwecc;
6262 nand_chip->ecc.write_page = mtk_nand_write_page_hwecc;
6263
6264 nand_chip->ecc.layout = &nand_oob_64;
6265 nand_chip->ecc.size = hw->nand_ecc_size; //2048
6266 nand_chip->ecc.bytes = hw->nand_ecc_bytes; //32
6267
6268 nand_chip->options = NAND_SKIP_BBTSCAN;
6269
6270 // For BMT, we need to revise driver architecture
6271 nand_chip->write_page = mtk_nand_write_page;
6272 nand_chip->read_page = mtk_nand_read_page;
6273 nand_chip->read_subpage = mtk_nand_read_subpage;
6274 nand_chip->ecc.write_oob = mtk_nand_write_oob;
6275 nand_chip->ecc.read_oob = mtk_nand_read_oob;
6276 nand_chip->block_markbad = mtk_nand_block_markbad; // need to add nand_get_device()/nand_release_device().
6277 nand_chip->erase = mtk_nand_erase;
6278 nand_chip->block_bad = mtk_nand_block_bad;
6279 nand_chip->init_size = mtk_nand_init_size;
6280#if CFG_FPGA_PLATFORM
6281 MSG(INIT, "[FPGA Dummy]Enable NFI and NFIECC Clock\n");
6282#else
6283 //MSG(INIT, "[NAND]Enable NFI and NFIECC Clock\n");
6284 nand_enable_clock();
6285#endif
6286 mtk_nand_gpio_init();
6287 mtk_nand_init_hw(host);
6288 /* Select the device */
6289 nand_chip->select_chip(mtd, NFI_DEFAULT_CS);
6290
6291 /*
6292 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
6293 * after power-up
6294 */
6295 nand_chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
6296
6297 /* Send the command for reading device ID */
6298 nand_chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
6299
6300 for(i=0;i<NAND_MAX_ID;i++){
6301 id[i]=nand_chip->read_byte(mtd);
6302 }
6303 manu_id = id[0];
6304 dev_id = id[1];
6305
6306 if (!get_device_info(id,&devinfo))
6307 {
6308 MSG(INIT, "Not Support this Device! \r\n");
6309 }
6310#if CFG_2CS_NAND
6311 if (mtk_nand_cs_check(mtd, id, NFI_TRICKY_CS))
6312 {
6313 MSG(INIT, "Twins Nand\n");
6314 g_bTricky_CS = TRUE;
6315 g_b2Die_CS = TRUE;
6316 }
6317#endif
6318
6319 if (devinfo.pagesize == 16384)
6320 {
6321 nand_chip->ecc.layout = &nand_oob_128;
6322 hw->nand_ecc_size = 16384;
6323 } else if (devinfo.pagesize == 8192)
6324 {
6325 nand_chip->ecc.layout = &nand_oob_128;
6326 hw->nand_ecc_size = 8192;
6327 } else if (devinfo.pagesize == 4096)
6328 {
6329 nand_chip->ecc.layout = &nand_oob_128;
6330 hw->nand_ecc_size = 4096;
6331 } else if (devinfo.pagesize == 2048)
6332 {
6333 nand_chip->ecc.layout = &nand_oob_64;
6334 hw->nand_ecc_size = 2048;
6335 } else if (devinfo.pagesize == 512)
6336 {
6337 nand_chip->ecc.layout = &nand_oob_16;
6338 hw->nand_ecc_size = 512;
6339 }
6340 if(devinfo.sectorsize == 1024)
6341 {
6342 sector_size = 1024;
6343 hw->nand_sec_shift = 10;
6344 hw->nand_sec_size = 1024;
6345 NFI_CLN_REG32(NFI_PAGEFMT_REG16, PAGEFMT_SECTOR_SEL);
6346 }
6347 if(devinfo.pagesize <= 4096)
6348 {
6349 nand_chip->ecc.layout->eccbytes = devinfo.sparesize-OOB_AVAI_PER_SECTOR*(devinfo.pagesize/sector_size);
6350 hw->nand_ecc_bytes = nand_chip->ecc.layout->eccbytes;
6351 // Modify to fit device character
6352 nand_chip->ecc.size = hw->nand_ecc_size;
6353 nand_chip->ecc.bytes = hw->nand_ecc_bytes;
6354 }
6355 else
6356 {
6357 nand_chip->ecc.layout->eccbytes = 64;//devinfo.sparesize-OOB_AVAI_PER_SECTOR*(devinfo.pagesize/sector_size);
6358 hw->nand_ecc_bytes = nand_chip->ecc.layout->eccbytes;
6359 // Modify to fit device character
6360 nand_chip->ecc.size = hw->nand_ecc_size;
6361 nand_chip->ecc.bytes = hw->nand_ecc_bytes;
6362 }
6363 nand_chip->subpagesize = devinfo.sectorsize;
6364 nand_chip->subpage_size = devinfo.sectorsize;
6365
6366 for(i=0;i<nand_chip->ecc.layout->eccbytes;i++){
6367 nand_chip->ecc.layout->eccpos[i]=OOB_AVAI_PER_SECTOR*(devinfo.pagesize/sector_size)+i;
6368 }
6369 //MSG(INIT, "[NAND] pagesz:%d , oobsz: %d,eccbytes: %d\n",
6370 // devinfo.pagesize, sizeof(g_kCMD.au1OOB),nand_chip->ecc.layout->eccbytes);
6371
6372
6373 //MSG(INIT, "Support this Device in MTK table! %x \r\n", id);
6374#if CFG_RANDOMIZER
6375 if(devinfo.vendor != VEND_NONE)
6376 {
6377 if((*EFUSE_RANDOM_CFG)&EFUSE_RANDOM_ENABLE)
6378 {
6379 MSG(INIT, "[NAND]EFUSE RANDOM CFG is ON\n");
6380 use_randomizer = TRUE;
6381 pre_randomizer = TRUE;
6382 }
6383 else
6384 {
6385 MSG(INIT, "[NAND]EFUSE RANDOM CFG is OFF\n");
6386 use_randomizer = FALSE;
6387 pre_randomizer = FALSE;
6388 }
6389 }
6390#endif
6391
6392 if((devinfo.feature_set.FeatureSet.rtype == RTYPE_HYNIX_16NM) || (devinfo.feature_set.FeatureSet.rtype == RTYPE_HYNIX))
6393 HYNIX_RR_TABLE_READ(&devinfo);
6394
6395 hw->nfi_bus_width = devinfo.iowidth;
6396#if 1
6397 if(devinfo.vendor == VEND_MICRON)
6398 {
6399 if(devinfo.feature_set.FeatureSet.Async_timing.feature != 0xFF)
6400 {
6401 struct gFeatureSet *feature_set = &(devinfo.feature_set.FeatureSet);
6402 //u32 val = 0;
6403 mtk_nand_SetFeature(mtd, (u16) feature_set->sfeatureCmd, \
6404 feature_set->Async_timing.address, (u8 *)(&feature_set->Async_timing.feature),\
6405 sizeof(feature_set->Async_timing.feature));
6406 //mtk_nand_GetFeature(mtd, feature_set->gfeatureCmd, \
6407 //feature_set->Async_timing.address, (u8 *)(&val),4);
6408 //printk("[ASYNC Interface]0x%X\n", val);
6409 #if CFG_2CS_NAND
6410 if(g_bTricky_CS)
6411 {
6412 DRV_WriteReg16(NFI_CSEL_REG16, NFI_TRICKY_CS);
6413 mtk_nand_SetFeature(mtd, (u16) feature_set->sfeatureCmd, \
6414 feature_set->Async_timing.address, (u8 *)(&feature_set->Async_timing.feature),\
6415 sizeof(feature_set->Async_timing.feature));
6416 DRV_WriteReg16(NFI_CSEL_REG16, NFI_DEFAULT_CS);
6417 }
6418 #endif
6419 }
6420 }
6421#endif
6422 //MSG(INIT, "AHB Clock(0x%x) ",DRV_Reg32(PERICFG_BASE+0x5C));
6423 //DRV_WriteReg32(PERICFG_BASE+0x5C, 0x1);
6424 //MSG(INIT, "AHB Clock(0x%x)",DRV_Reg32(PERICFG_BASE+0x5C));
6425 DRV_WriteReg32(NFI_ACCCON_REG32, devinfo.timmingsetting);
6426 //MSG(INIT, "Kernel Nand Timing:0x%x!\n", DRV_Reg32(NFI_ACCCON_REG32));
6427
6428 /* 16-bit bus width */
6429 if (hw->nfi_bus_width == 16)
6430 {
6431 MSG(INIT, "%s : Set the 16-bit I/O settings!\n", MODULE_NAME);
6432 nand_chip->options |= NAND_BUSWIDTH_16;
6433 }
6434 mt_irq_set_sens(MT_NFI_IRQ_ID, MT65xx_LEVEL_SENSITIVE);
6435 mt_irq_set_polarity(MT_NFI_IRQ_ID, MT65xx_POLARITY_LOW);
6436 err = request_irq(MT_NFI_IRQ_ID, mtk_nand_irq_handler, IRQF_DISABLED, "mtk-nand", NULL);
6437
6438 if (0 != err)
6439 {
6440 MSG(INIT, "%s : Request IRQ fail: err = %d\n", MODULE_NAME, err);
6441 goto out;
6442 }
6443
6444 if (g_i4Interrupt)
6445 enable_irq(MT_NFI_IRQ_ID);
6446 else
6447 disable_irq(MT_NFI_IRQ_ID);
6448
6449#if 0
6450 if (devinfo.advancedmode & CACHE_READ)
6451 {
6452 nand_chip->ecc.read_multi_page_cache = NULL;
6453 // nand_chip->ecc.read_multi_page_cache = mtk_nand_read_multi_page_cache;
6454 // MSG(INIT, "Device %x support cache read \r\n",id);
6455 } else
6456 nand_chip->ecc.read_multi_page_cache = NULL;
6457#endif
6458 mtd->oobsize = devinfo.sparesize;
6459 /* Scan to find existance of the device */
6460 if (nand_scan(mtd, hw->nfi_cs_num))
6461 {
6462 MSG(INIT, "%s : nand_scan fail.\n", MODULE_NAME);
6463 err = -ENXIO;
6464 goto out;
6465 }
6466
6467 g_page_size = mtd->writesize;
6468 g_block_size = devinfo.blocksize << 10;
6469 PAGES_PER_BLOCK = (u32)(g_block_size / g_page_size);
6470 //MSG(INIT, "g_page_size(%d) g_block_size(%d)\n",g_page_size, g_block_size);
6471#if CFG_2CS_NAND
6472 g_nanddie_pages = (u32)(nand_chip->chipsize >> nand_chip->page_shift);
6473 //if(devinfo.ttarget == TTYPE_2DIE)
6474 //{
6475 // g_nanddie_pages = g_nanddie_pages / 2;
6476 //}
6477 if(g_b2Die_CS)
6478 {
6479 nand_chip->chipsize <<= 1;
6480 //MSG(INIT, "[Bean]%dMB\n", (u32)(nand_chip->chipsize/1024/1024));
6481 }
6482 //MSG(INIT, "[Bean]g_nanddie_pages %x\n", g_nanddie_pages);
6483#endif
6484 #if CFG_COMBO_NAND
6485 #ifdef PART_SIZE_BMTPOOL
6486 if (PART_SIZE_BMTPOOL)
6487 {
6488 bmt_sz = (PART_SIZE_BMTPOOL) >> nand_chip->phys_erase_shift;
6489 }else
6490 #endif
6491 {
6492 bmt_sz = (int)(((u32)(nand_chip->chipsize >> nand_chip->phys_erase_shift))/100*6);
6493 }
6494 //if (manu_id == 0x45)
6495 //{
6496 // bmt_sz = bmt_sz * 2;
6497 //}
6498 #endif
6499 platform_set_drvdata(pdev, host);
6500
6501 if (hw->nfi_bus_width == 16)
6502 {
6503 NFI_SET_REG16(NFI_PAGEFMT_REG16, PAGEFMT_DBYTE_EN);
6504 }
6505
6506 nand_chip->select_chip(mtd, 0);
6507 #if defined(MTK_COMBO_NAND_SUPPORT)
6508 #if CFG_COMBO_NAND
6509 nand_chip->chipsize -= (bmt_sz * g_block_size);
6510 #else
6511 nand_chip->chipsize -= (PART_SIZE_BMTPOOL);
6512 #endif
6513 //#if CFG_2CS_NAND
6514 //if(g_b2Die_CS)
6515 //{
6516 // nand_chip->chipsize -= (PART_SIZE_BMTPOOL); // if 2CS nand need cut down again
6517 //}
6518 //#endif
6519 #else
6520 nand_chip->chipsize -= (BMT_POOL_SIZE) << nand_chip->phys_erase_shift;
6521 #endif
6522 mtd->size = nand_chip->chipsize;
6523#if NAND_READ_PERFORMANCE
6524 struct timeval stimer,etimer;
6525 do_gettimeofday(&stimer);
6526 for (i = 256; i < 512; i++)
6527 {
6528 mtk_nand_read_page(mtd, nand_chip, local_buffer, i);
6529 MSG(INIT,"[%d]0x%x %x %x %x\n",i, *(int *)local_buffer, *((int *)local_buffer+1), *((int *)local_buffer+2), *((int *)local_buffer+3));
6530 MSG(INIT,"[%d]0x%x %x %x %x\n",i, *(int *)local_buffer+4, *((int *)local_buffer+5), *((int *)local_buffer+6), *((int *)local_buffer+7));
6531 MSG(INIT,"[%d]0x%x %x %x %x\n",i, *(int *)local_buffer+8, *((int *)local_buffer+9), *((int *)local_buffer+10), *((int *)local_buffer+11));
6532 MSG(INIT,"[%d]0x%x %x %x %x\n",i, *(int *)local_buffer+12, *((int *)local_buffer+13), *((int *)local_buffer+14), *((int *)local_buffer+15));
6533 }
6534 do_gettimeofday(&etimer);
6535 printk("[NAND Read Perf.Test] %ld MB/s\n", (g_page_size*256)/Cal_timediff(&etimer,&stimer));
6536#endif
6537
6538 if(devinfo.vendor != VEND_NONE)
6539 {
6540 err = mtk_nand_interface_config(mtd);
6541 #if CFG_2CS_NAND
6542 if(g_bTricky_CS)
6543 {
6544 DRV_WriteReg16(NFI_CSEL_REG16, NFI_TRICKY_CS);
6545 err = mtk_nand_interface_config(mtd);
6546 DRV_WriteReg16(NFI_CSEL_REG16, NFI_DEFAULT_CS);
6547 }
6548 #endif
6549 //MSG(INIT, "[nand_interface_config] %d\n",err);
6550 //u32 regp;
6551 //for (regp = 0xF0206000; regp <= 0xF020631C; regp+=4)
6552 // printk("[%08X]0x%08X\n", regp, DRV_Reg32(regp));
6553 #if NAND_READ_PERFORMANCE
6554 do_gettimeofday(&stimer);
6555 for (i = 256; i < 512; i++)
6556 {
6557 mtk_nand_read_page(mtd, nand_chip, local_buffer, i);
6558 MSG(INIT,"[%d]0x%x %x %x %x\n",i, *(int *)local_buffer, *((int *)local_buffer+1), *((int *)local_buffer+2), *((int *)local_buffer+3));
6559 MSG(INIT,"[%d]0x%x %x %x %x\n",i, *(int *)local_buffer+4, *((int *)local_buffer+5), *((int *)local_buffer+6), *((int *)local_buffer+7));
6560 MSG(INIT,"[%d]0x%x %x %x %x\n",i, *(int *)local_buffer+8, *((int *)local_buffer+9), *((int *)local_buffer+10), *((int *)local_buffer+11));
6561 MSG(INIT,"[%d]0x%x %x %x %x\n",i, *(int *)local_buffer+12, *((int *)local_buffer+13), *((int *)local_buffer+14), *((int *)local_buffer+15));
6562 }
6563 do_gettimeofday(&etimer);
6564 printk("[NAND Read Perf.Test] %d MB/s\n", (g_page_size*256)/Cal_timediff(&etimer,&stimer));
6565 while(1);
6566 #endif
6567 }
6568
6569 if (!g_bmt)
6570 {
6571 #if defined(MTK_COMBO_NAND_SUPPORT)
6572 #if CFG_COMBO_NAND
6573 if (!(g_bmt = init_bmt(nand_chip, bmt_sz)))
6574 #else
6575 if (!(g_bmt = init_bmt(nand_chip, ((PART_SIZE_BMTPOOL) >> nand_chip->phys_erase_shift))))
6576 #endif
6577 #else
6578 if (!(g_bmt = init_bmt(nand_chip, BMT_POOL_SIZE)))
6579 #endif
6580 {
6581 MSG(INIT, "Error: init bmt failed\n");
6582 return 0;
6583 }
6584 }
6585
6586 nand_chip->chipsize -= (PMT_POOL_SIZE) << nand_chip->phys_erase_shift;
6587 mtd->size = nand_chip->chipsize;
6588#if KERNEL_NAND_UNIT_TEST
6589 err = mtk_nand_unit_test(nand_chip, mtd);
6590 if (err == 0)
6591 {
6592 printk("Thanks to GOD, UNIT Test OK!\n");
6593 }
6594#endif
6595#ifdef PMT
6596 part_init_pmt(mtd, (u8 *) & g_exist_Partition[0]);
6597 err = mtd_device_register(mtd, g_exist_Partition, part_num);
6598#else
6599 err = mtd_device_register(mtd, g_pasStatic_Partition, part_num);
6600#endif
6601
6602#ifdef _MTK_NAND_DUMMY_DRIVER_
6603 dummy_driver_debug = 0;
6604#endif
6605
6606 /* Successfully!! */
6607 if (!err)
6608 {
6609 //MSG(INIT, "[mtk_nand] probe successfully!\n");
6610 nand_disable_clock();
6611 return err;
6612 }
6613
6614 /* Fail!! */
6615 out:
6616 MSG(INIT, "[NFI] mtk_nand_probe fail, err = %d!\n", err);
6617 nand_release(mtd);
6618 platform_set_drvdata(pdev, NULL);
6619 kfree(host);
6620 nand_disable_clock();
6621 return err;
6622}
6623/******************************************************************************
6624 * mtk_nand_suspend
6625 *
6626 * DESCRIPTION:
6627 * Suspend the nand device!
6628 *
6629 * PARAMETERS:
6630 * struct platform_device *pdev : device structure
6631 *
6632 * RETURNS:
6633 * 0 : Success
6634 *
6635 * NOTES:
6636 * None
6637 *
6638 ******************************************************************************/
6639static int mtk_nand_suspend(struct platform_device *pdev, pm_message_t state)
6640{
6641 struct mtk_nand_host *host = platform_get_drvdata(pdev);
6642// struct mtd_info *mtd = &host->mtd;
6643 // backup register
6644 #ifdef CONFIG_PM
6645
6646 if(host->saved_para.suspend_flag==0)
6647 {
6648 nand_enable_clock();
6649 // Save NFI register
6650 host->saved_para.sNFI_CNFG_REG16 = DRV_Reg16(NFI_CNFG_REG16);
6651 host->saved_para.sNFI_PAGEFMT_REG16 = DRV_Reg16(NFI_PAGEFMT_REG16);
6652 host->saved_para.sNFI_CON_REG16 = DRV_Reg32(NFI_CON_REG16);
6653 host->saved_para.sNFI_ACCCON_REG32 = DRV_Reg32(NFI_ACCCON_REG32);
6654 host->saved_para.sNFI_INTR_EN_REG16 = DRV_Reg16(NFI_INTR_EN_REG16);
6655 host->saved_para.sNFI_IOCON_REG16 = DRV_Reg16(NFI_IOCON_REG16);
6656 host->saved_para.sNFI_CSEL_REG16 = DRV_Reg16(NFI_CSEL_REG16);
6657 host->saved_para.sNFI_DEBUG_CON1_REG16 = DRV_Reg16(NFI_DEBUG_CON1_REG16);
6658
6659 // save ECC register
6660 host->saved_para.sECC_ENCCNFG_REG32 = DRV_Reg32(ECC_ENCCNFG_REG32);
6661// host->saved_para.sECC_FDMADDR_REG32 = DRV_Reg32(ECC_FDMADDR_REG32);
6662 host->saved_para.sECC_DECCNFG_REG32 = DRV_Reg32(ECC_DECCNFG_REG32);
6663 // for sync mode
6664 if (g_bSyncOrToggle)
6665 {
6666 host->saved_para.sNFI_DLYCTRL_REG32 = DRV_Reg32(NFI_DLYCTRL_REG32);
6667 host->saved_para.sPERI_NFI_MAC_CTRL = DRV_Reg32(PERI_NFI_MAC_CTRL);
6668 host->saved_para.sNFI_NAND_TYPE_CNFG_REG32 = DRV_Reg32(NFI_NAND_TYPE_CNFG_REG32);
6669 host->saved_para.sNFI_ACCCON1_REG32 = DRV_Reg32(NFI_ACCCON1_REG3);
6670 }
6671 #ifdef MTK_PMIC_MT6397
6672 hwPowerDown(MT65XX_POWER_LDO_VMCH, "NFI");
6673 #else
6674 hwPowerDown(MT6323_POWER_LDO_VMCH, "NFI");
6675 #endif
6676 nand_disable_clock();
6677 host->saved_para.suspend_flag=1;
6678 }
6679 else
6680 {
6681 MSG(INIT, "[NFI] Suspend twice !\n");
6682 }
6683 #endif
6684
6685 MSG(INIT, "[NFI] Suspend !\n");
6686 return 0;
6687}
6688
6689/******************************************************************************
6690 * mtk_nand_resume
6691 *
6692 * DESCRIPTION:
6693 * Resume the nand device!
6694 *
6695 * PARAMETERS:
6696 * struct platform_device *pdev : device structure
6697 *
6698 * RETURNS:
6699 * 0 : Success
6700 *
6701 * NOTES:
6702 * None
6703 *
6704 ******************************************************************************/
6705static int mtk_nand_resume(struct platform_device *pdev)
6706{
6707 struct mtk_nand_host *host = platform_get_drvdata(pdev);
6708 //struct mtd_info *mtd = &host->mtd; //for test
6709// struct nand_chip *chip = mtd->priv;
6710 //struct gFeatureSet *feature_set = &(devinfo.feature_set.FeatureSet); //for test
6711 //int val = -1; // for test
6712 //[BUGFIX]-Add-BEGIN by SCDTABLET.(lilin.liu@jrdcom.com), PR981151,981152 04/24/2015
6713 u32 timeout = 2000;
6714 bool ret = true;
6715 //[BUGFIX]-Add-END by SCDTABLET.(lilin.liu@jrdcom.com)
6716
6717#ifdef CONFIG_PM
6718
6719 if(host->saved_para.suspend_flag==1)
6720 {
6721 nand_enable_clock();
6722 // restore NFI register
6723 #ifdef MTK_PMIC_MT6397
6724 hwPowerOn(MT65XX_POWER_LDO_VMCH, VOL_3300, "NFI");
6725 #else
6726 hwPowerOn(MT6323_POWER_LDO_VMCH, VOL_3300, "NFI");
6727 #endif
6728
6729 //[BUGFIX]-Add-BEGIN by SCDTABLET.(lilin.liu@jrdcom.com), PR981151,981152 04/24/2015
6730 MSG(INIT, "[NFI] Resume Add extera 1ms Delay and wait for device reset ready !\n");
6731 mdelay(1);
6732 while (timeout--){
6733 ret = mtk_nand_device_reset();
6734 if(ret == true)
6735 break;
6736
6737 udelay(100); //total 200ms polling nand reset status
6738 }
6739
6740 if(ret == false){
6741 MSG(INIT, "[NFI] Resume Error, device reset failed here!\n");
6742 }
6743 //[BUGFIX]-Add-END by SCDTABLET.(lilin.liu@jrdcom.com)
6744 DRV_WriteReg16(NFI_CNFG_REG16 ,host->saved_para.sNFI_CNFG_REG16);
6745 DRV_WriteReg16(NFI_PAGEFMT_REG16 ,host->saved_para.sNFI_PAGEFMT_REG16);
6746 DRV_WriteReg32(NFI_CON_REG16 ,host->saved_para.sNFI_CON_REG16);
6747 DRV_WriteReg32(NFI_ACCCON_REG32 ,host->saved_para.sNFI_ACCCON_REG32);
6748 DRV_WriteReg16(NFI_IOCON_REG16 ,host->saved_para.sNFI_IOCON_REG16);
6749 DRV_WriteReg16(NFI_CSEL_REG16 ,host->saved_para.sNFI_CSEL_REG16);
6750 DRV_WriteReg16(NFI_DEBUG_CON1_REG16 ,host->saved_para.sNFI_DEBUG_CON1_REG16);
6751
6752 // restore ECC register
6753 DRV_WriteReg32(ECC_ENCCNFG_REG32 ,host->saved_para.sECC_ENCCNFG_REG32);
6754// DRV_WriteReg32(ECC_FDMADDR_REG32 ,host->saved_para.sECC_FDMADDR_REG32);
6755 DRV_WriteReg32(ECC_DECCNFG_REG32 ,host->saved_para.sECC_DECCNFG_REG32);
6756
6757 // Reset NFI and ECC state machine
6758 /* Reset the state machine and data FIFO, because flushing FIFO */
6759 (void)mtk_nand_reset();
6760 // Reset ECC
6761 DRV_WriteReg16(ECC_DECCON_REG16, DEC_DE);
6762 while (!DRV_Reg16(ECC_DECIDLE_REG16));
6763
6764 DRV_WriteReg16(ECC_ENCCON_REG16, ENC_DE);
6765 while (!DRV_Reg32(ECC_ENCIDLE_REG32));
6766
6767
6768 /* Initilize interrupt. Clear interrupt, read clear. */
6769 DRV_Reg16(NFI_INTR_REG16);
6770
6771 DRV_WriteReg16(NFI_INTR_EN_REG16 ,host->saved_para.sNFI_INTR_EN_REG16);
6772
6773 //mtk_nand_interface_config(&host->mtd);
6774 if (g_bSyncOrToggle)
6775 {
6776 NFI_CLN_REG32(NFI_DEBUG_CON1_REG16,HWDCM_SWCON_ON);
6777 NFI_CLN_REG32(NFI_DEBUG_CON1_REG16,NFI_BYPASS);
6778 NFI_CLN_REG32(ECC_BYPASS_REG32,ECC_BYPASS);
6779 DRV_WriteReg32(PERICFG_BASE+0x5C, 0x0);
6780 NFI_SET_REG32(PERI_NFI_CLK_SOURCE_SEL, NFI_PAD_1X_CLOCK);
6781 clkmux_sel(MT_MUX_NFI2X,g_iNFI2X_CLKSRC,"NFI");
6782 DRV_WriteReg32(NFI_DLYCTRL_REG32, host->saved_para.sNFI_DLYCTRL_REG32);
6783 DRV_WriteReg32(PERI_NFI_MAC_CTRL, host->saved_para.sPERI_NFI_MAC_CTRL);
6784 while(0 == (DRV_Reg32(NFI_STA_REG32) && STA_FLASH_MACRO_IDLE));
6785 DRV_WriteReg16(NFI_NAND_TYPE_CNFG_REG32, host->saved_para.sNFI_NAND_TYPE_CNFG_REG32);
6786 DRV_WriteReg32(NFI_ACCCON1_REG3,host->saved_para.sNFI_ACCCON1_REG32);
6787 }
6788 //mtk_nand_GetFeature(mtd, feature_set->gfeatureCmd, \
6789 //feature_set->Interface.address, (u8 *)&val,4);
6790 //MSG(POWERCTL, "[NFI] Resume feature %d!\n", val);
6791 nand_disable_clock();
6792 host->saved_para.suspend_flag = 0;
6793 }
6794 else
6795 {
6796 MSG(INIT, "[NFI] Resume twice !\n");
6797 }
6798#endif
6799 MSG(INIT, "[NFI] Resume !\n");
6800 return 0;
6801}
6802
6803/******************************************************************************
6804 * mtk_nand_remove
6805 *
6806 * DESCRIPTION:
6807 * unregister the nand device file operations !
6808 *
6809 * PARAMETERS:
6810 * struct platform_device *pdev : device structure
6811 *
6812 * RETURNS:
6813 * 0 : Success
6814 *
6815 * NOTES:
6816 * None
6817 *
6818 ******************************************************************************/
6819
6820static int mtk_nand_remove(struct platform_device *pdev)
6821{
6822 struct mtk_nand_host *host = platform_get_drvdata(pdev);
6823 struct mtd_info *mtd = &host->mtd;
6824
6825 nand_release(mtd);
6826
6827 kfree(host);
6828
6829 nand_disable_clock();
6830
6831 return 0;
6832}
6833
6834/******************************************************************************
6835 * NAND OTP operations
6836 * ***************************************************************************/
6837#if (defined(NAND_OTP_SUPPORT) && SAMSUNG_OTP_SUPPORT)
6838unsigned int samsung_OTPQueryLength(unsigned int *QLength)
6839{
6840 *QLength = SAMSUNG_OTP_PAGE_NUM * g_page_size;
6841 return 0;
6842}
6843
6844unsigned int samsung_OTPRead(unsigned int PageAddr, void *BufferPtr, void *SparePtr)
6845{
6846 struct mtd_info *mtd = &host->mtd;
6847 unsigned int rowaddr, coladdr;
6848 unsigned int u4Size = g_page_size;
6849 unsigned int timeout = 0xFFFF;
6850 unsigned int bRet;
6851 unsigned int sec_num = mtd->writesize >> host->hw->nand_sec_shift;
6852
6853 if (PageAddr >= SAMSUNG_OTP_PAGE_NUM)
6854 {
6855 return OTP_ERROR_OVERSCOPE;
6856 }
6857
6858 /* Col -> Row; LSB first */
6859 coladdr = 0x00000000;
6860 rowaddr = Samsung_OTP_Page[PageAddr];
6861
6862 MSG(OTP, "[%s]:(COLADDR) [0x%08x]/(ROWADDR)[0x%08x]\n", __func__, coladdr, rowaddr);
6863
6864 /* Power on NFI HW component. */
6865 nand_get_device(mtd, FL_READING);
6866 mtk_nand_reset();
6867 (void)mtk_nand_set_command(0x30);
6868 mtk_nand_reset();
6869 (void)mtk_nand_set_command(0x65);
6870
6871 MSG(OTP, "[%s]: Start to read data from OTP area\n", __func__);
6872
6873 if (!mtk_nand_reset())
6874 {
6875 bRet = OTP_ERROR_RESET;
6876 goto cleanup;
6877 }
6878
6879 mtk_nand_set_mode(CNFG_OP_READ);
6880 NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
6881 DRV_WriteReg32(NFI_CON_REG16, sec_num << CON_NFI_SEC_SHIFT);
6882
6883 DRV_WriteReg32(NFI_STRADDR_REG32, __virt_to_phys(BufferPtr));
6884 NFI_SET_REG16(NFI_CNFG_REG16, CNFG_AHB);
6885
6886 if (g_bHwEcc)
6887 {
6888 NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
6889 } else
6890 {
6891 NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
6892 }
6893 mtk_nand_set_autoformat(true);
6894 if (g_bHwEcc)
6895 {
6896 ECC_Decode_Start();
6897 }
6898 if (!mtk_nand_set_command(NAND_CMD_READ0))
6899 {
6900 bRet = OTP_ERROR_BUSY;
6901 goto cleanup;
6902 }
6903
6904 if (!mtk_nand_set_address(coladdr, rowaddr, 2, 3))
6905 {
6906 bRet = OTP_ERROR_BUSY;
6907 goto cleanup;
6908 }
6909
6910 if (!mtk_nand_set_command(NAND_CMD_READSTART))
6911 {
6912 bRet = OTP_ERROR_BUSY;
6913 goto cleanup;
6914 }
6915
6916 if (!mtk_nand_status_ready(STA_NAND_BUSY))
6917 {
6918 bRet = OTP_ERROR_BUSY;
6919 goto cleanup;
6920 }
6921
6922 if (!mtk_nand_read_page_data(mtd, BufferPtr, u4Size))
6923 {
6924 bRet = OTP_ERROR_BUSY;
6925 goto cleanup;
6926 }
6927
6928 if (!mtk_nand_status_ready(STA_NAND_BUSY))
6929 {
6930 bRet = OTP_ERROR_BUSY;
6931 goto cleanup;
6932 }
6933
6934 mtk_nand_read_fdm_data(SparePtr, sec_num);
6935
6936 mtk_nand_stop_read();
6937
6938 MSG(OTP, "[%s]: End to read data from OTP area\n", __func__);
6939
6940 bRet = OTP_SUCCESS;
6941
6942 cleanup:
6943
6944 mtk_nand_reset();
6945 (void)mtk_nand_set_command(0xFF);
6946 nand_release_device(mtd);
6947 return bRet;
6948}
6949
6950unsigned int samsung_OTPWrite(unsigned int PageAddr, void *BufferPtr, void *SparePtr)
6951{
6952 struct mtd_info *mtd = &host->mtd;
6953 unsigned int rowaddr, coladdr;
6954 unsigned int u4Size = g_page_size;
6955 unsigned int timeout = 0xFFFF;
6956 unsigned int bRet;
6957 unsigned int sec_num = mtd->writesize >> 9;
6958
6959 if (PageAddr >= SAMSUNG_OTP_PAGE_NUM)
6960 {
6961 return OTP_ERROR_OVERSCOPE;
6962 }
6963
6964 /* Col -> Row; LSB first */
6965 coladdr = 0x00000000;
6966 rowaddr = Samsung_OTP_Page[PageAddr];
6967
6968 MSG(OTP, "[%s]:(COLADDR) [0x%08x]/(ROWADDR)[0x%08x]\n", __func__, coladdr, rowaddr);
6969 nand_get_device(mtd, FL_READING);
6970 mtk_nand_reset();
6971 (void)mtk_nand_set_command(0x30);
6972 mtk_nand_reset();
6973 (void)mtk_nand_set_command(0x65);
6974
6975 MSG(OTP, "[%s]: Start to write data to OTP area\n", __func__);
6976
6977 if (!mtk_nand_reset())
6978 {
6979 bRet = OTP_ERROR_RESET;
6980 goto cleanup;
6981 }
6982
6983 mtk_nand_set_mode(CNFG_OP_PRGM);
6984
6985 NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
6986
6987 DRV_WriteReg32(NFI_CON_REG16, sec_num << CON_NFI_SEC_SHIFT);
6988
6989 DRV_WriteReg32(NFI_STRADDR_REG32, __virt_to_phys(BufferPtr));
6990 NFI_SET_REG16(NFI_CNFG_REG16, CNFG_AHB);
6991
6992 if (g_bHwEcc)
6993 {
6994 NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
6995 } else
6996 {
6997 NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
6998 }
6999 mtk_nand_set_autoformat(true);
7000
7001 ECC_Encode_Start();
7002
7003 if (!mtk_nand_set_command(NAND_CMD_SEQIN))
7004 {
7005 bRet = OTP_ERROR_BUSY;
7006 goto cleanup;
7007 }
7008
7009 if (!mtk_nand_set_address(coladdr, rowaddr, 2, 3))
7010 {
7011 bRet = OTP_ERROR_BUSY;
7012 goto cleanup;
7013 }
7014
7015 if (!mtk_nand_status_ready(STA_NAND_BUSY))
7016 {
7017 bRet = OTP_ERROR_BUSY;
7018 goto cleanup;
7019 }
7020
7021 mtk_nand_write_fdm_data((struct nand_chip *)mtd->priv, BufferPtr, sec_num);
7022 (void)mtk_nand_write_page_data(mtd, BufferPtr, u4Size);
7023 if (!mtk_nand_check_RW_count(u4Size))
7024 {
7025 MSG(OTP, "[%s]: Check RW count timeout !\n", __func__);
7026 bRet = OTP_ERROR_TIMEOUT;
7027 goto cleanup;
7028 }
7029
7030 mtk_nand_stop_write();
7031 (void)mtk_nand_set_command(NAND_CMD_PAGEPROG);
7032 while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY) ;
7033
7034 bRet = OTP_SUCCESS;
7035
7036 MSG(OTP, "[%s]: End to write data to OTP area\n", __func__);
7037
7038 cleanup:
7039 mtk_nand_reset();
7040 (void)mtk_nand_set_command( NAND_CMD_RESET);
7041 nand_release_device(mtd);
7042 return bRet;
7043}
7044
7045static int mt_otp_open(struct inode *inode, struct file *filp)
7046{
7047 MSG(OTP, "[%s]:(MAJOR)%d:(MINOR)%d\n", __func__, MAJOR(inode->i_rdev), MINOR(inode->i_rdev));
7048 filp->private_data = (int *)OTP_MAGIC_NUM;
7049 return 0;
7050}
7051
7052static int mt_otp_release(struct inode *inode, struct file *filp)
7053{
7054 MSG(OTP, "[%s]:(MAJOR)%d:(MINOR)%d\n", __func__, MAJOR(inode->i_rdev), MINOR(inode->i_rdev));
7055 return 0;
7056}
7057
7058static int mt_otp_access(unsigned int access_type, unsigned int offset, void *buff_ptr, unsigned int length, unsigned int *status)
7059{
7060 unsigned int i = 0, ret = 0;
7061 char *BufAddr = (char *)buff_ptr;
7062 unsigned int PageAddr, AccessLength = 0;
7063 int Status = 0;
7064
7065 static char *p_D_Buff = NULL;
7066 char S_Buff[64];
7067
7068 if (!(p_D_Buff = kmalloc(g_page_size, GFP_KERNEL)))
7069 {
7070 ret = -ENOMEM;
7071 *status = OTP_ERROR_NOMEM;
7072 goto exit;
7073 }
7074
7075 MSG(OTP, "[%s]: %s (0x%x) length:(%d bytes) !\n", __func__, access_type ? "WRITE" : "READ", offset, length);
7076
7077 while (1)
7078 {
7079 PageAddr = offset / g_page_size;
7080 if (FS_OTP_READ == access_type)
7081 {
7082 memset(p_D_Buff, 0xff, g_page_size);
7083 memset(S_Buff, 0xff, (sizeof(char) * 64));
7084
7085 MSG(OTP, "[%s]: Read Access of page (%d)\n", __func__, PageAddr);
7086
7087 Status = g_mtk_otp_fuc.OTPRead(PageAddr, p_D_Buff, &S_Buff);
7088 *status = Status;
7089
7090 if (OTP_SUCCESS != Status)
7091 {
7092 MSG(OTP, "[%s]: Read status (%d)\n", __func__, Status);
7093 break;
7094 }
7095
7096 AccessLength = g_page_size - (offset % g_page_size);
7097
7098 if (length >= AccessLength)
7099 {
7100 memcpy(BufAddr, (p_D_Buff + (offset % g_page_size)), AccessLength);
7101 } else
7102 {
7103 //last time
7104 memcpy(BufAddr, (p_D_Buff + (offset % g_page_size)), length);
7105 }
7106 } else if (FS_OTP_WRITE == access_type)
7107 {
7108 AccessLength = g_page_size - (offset % g_page_size);
7109 memset(p_D_Buff, 0xff, g_page_size);
7110 memset(S_Buff, 0xff, (sizeof(char) * 64));
7111
7112 if (length >= AccessLength)
7113 {
7114 memcpy((p_D_Buff + (offset % g_page_size)), BufAddr, AccessLength);
7115 } else
7116 {
7117 //last time
7118 memcpy((p_D_Buff + (offset % g_page_size)), BufAddr, length);
7119 }
7120
7121 Status = g_mtk_otp_fuc.OTPWrite(PageAddr, p_D_Buff, &S_Buff);
7122 *status = Status;
7123
7124 if (OTP_SUCCESS != Status)
7125 {
7126 MSG(OTP, "[%s]: Write status (%d)\n", __func__, Status);
7127 break;
7128 }
7129 } else
7130 {
7131 MSG(OTP, "[%s]: Error, not either read nor write operations !\n", __func__);
7132 break;
7133 }
7134
7135 offset += AccessLength;
7136 BufAddr += AccessLength;
7137 if (length <= AccessLength)
7138 {
7139 length = 0;
7140 break;
7141 } else
7142 {
7143 length -= AccessLength;
7144 MSG(OTP, "[%s]: Remaining %s (%d) !\n", __func__, access_type ? "WRITE" : "READ", length);
7145 }
7146 }
7147 error:
7148 kfree(p_D_Buff);
7149 exit:
7150 return ret;
7151}
7152
7153static long mt_otp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
7154{
7155 int ret = 0, i = 0;
7156 static char *pbuf = NULL;
7157
7158 void __user *uarg = (void __user *)arg;
7159 struct otp_ctl otpctl;
7160
7161 /* Lock */
7162 spin_lock(&g_OTPLock);
7163
7164 if (copy_from_user(&otpctl, uarg, sizeof(struct otp_ctl)))
7165 {
7166 ret = -EFAULT;
7167 goto exit;
7168 }
7169
7170 if (false == g_bInitDone)
7171 {
7172 MSG(OTP, "ERROR: NAND Flash Not initialized !!\n");
7173 ret = -EFAULT;
7174 goto exit;
7175 }
7176
7177 if (!(pbuf = kmalloc(sizeof(char) * otpctl.Length, GFP_KERNEL)))
7178 {
7179 ret = -ENOMEM;
7180 goto exit;
7181 }
7182
7183 switch (cmd)
7184 {
7185 case OTP_GET_LENGTH:
7186 MSG(OTP, "OTP IOCTL: OTP_GET_LENGTH\n");
7187 g_mtk_otp_fuc.OTPQueryLength(&otpctl.QLength);
7188 otpctl.status = OTP_SUCCESS;
7189 MSG(OTP, "OTP IOCTL: The Length is %d\n", otpctl.QLength);
7190 break;
7191 case OTP_READ:
7192 MSG(OTP, "OTP IOCTL: OTP_READ Offset(0x%x), Length(0x%x) \n", otpctl.Offset, otpctl.Length);
7193 memset(pbuf, 0xff, sizeof(char) * otpctl.Length);
7194
7195 mt_otp_access(FS_OTP_READ, otpctl.Offset, pbuf, otpctl.Length, &otpctl.status);
7196
7197 if (copy_to_user(otpctl.BufferPtr, pbuf, (sizeof(char) * otpctl.Length)))
7198 {
7199 MSG(OTP, "OTP IOCTL: Copy to user buffer Error !\n");
7200 goto error;
7201 }
7202 break;
7203 case OTP_WRITE:
7204 MSG(OTP, "OTP IOCTL: OTP_WRITE Offset(0x%x), Length(0x%x) \n", otpctl.Offset, otpctl.Length);
7205 if (copy_from_user(pbuf, otpctl.BufferPtr, (sizeof(char) * otpctl.Length)))
7206 {
7207 MSG(OTP, "OTP IOCTL: Copy from user buffer Error !\n");
7208 goto error;
7209 }
7210 mt_otp_access(FS_OTP_WRITE, otpctl.Offset, pbuf, otpctl.Length, &otpctl.status);
7211 break;
7212 default:
7213 ret = -EINVAL;
7214 }
7215
7216 ret = copy_to_user(uarg, &otpctl, sizeof(struct otp_ctl));
7217
7218 error:
7219 kfree(pbuf);
7220 exit:
7221 spin_unlock(&g_OTPLock);
7222 return ret;
7223}
7224
7225static struct file_operations nand_otp_fops = {
7226 .owner = THIS_MODULE,
7227 .unlocked_ioctl = mt_otp_ioctl,
7228 .open = mt_otp_open,
7229 .release = mt_otp_release,
7230};
7231
7232static struct miscdevice nand_otp_dev = {
7233 .minor = MISC_DYNAMIC_MINOR,
7234 .name = "otp",
7235 .fops = &nand_otp_fops,
7236};
7237#endif
7238
7239/******************************************************************************
7240Device driver structure
7241******************************************************************************/
7242static struct platform_driver mtk_nand_driver = {
7243 .probe = mtk_nand_probe,
7244 .remove = mtk_nand_remove,
7245 .suspend = mtk_nand_suspend,
7246 .resume = mtk_nand_resume,
7247 .driver = {
7248 .name = "mtk-nand",
7249 .owner = THIS_MODULE,
7250 },
7251};
7252
7253/******************************************************************************
7254 * mtk_nand_init
7255 *
7256 * DESCRIPTION:
7257 * Init the device driver !
7258 *
7259 * PARAMETERS:
7260 * None
7261 *
7262 * RETURNS:
7263 * None
7264 *
7265 * NOTES:
7266 * None
7267 *
7268 ******************************************************************************/
7269 #define SEQ_printf(m, x...) \
7270 do { \
7271 if (m) \
7272 seq_printf(m, x); \
7273 else \
7274 printk(x); \
7275 } while (0)
7276
7277int mtk_nand_proc_show(struct seq_file *m, void *v)
7278{
7279 int i;
7280 SEQ_printf(m, "ID:");
7281 for(i=0;i<devinfo.id_length;i++){
7282 SEQ_printf(m, " 0x%x", devinfo.id[i]);
7283 }
7284 SEQ_printf(m, "\n");
7285 SEQ_printf(m, "total size: %dMiB; part number: %s\n", devinfo.totalsize,devinfo.devciename);
7286 SEQ_printf(m, "Current working in %s mode\n", g_i4Interrupt ? "interrupt" : "polling");
7287 SEQ_printf(m, "NFI_ACCON(0x%x)=0x%x\n",(NFI_BASE+0x000C),DRV_Reg32(NFI_ACCCON_REG32));
7288 SEQ_printf(m, "NFI_NAND_TYPE_CNFG_REG32= 0x%x\n",DRV_Reg32(NFI_NAND_TYPE_CNFG_REG32));
7289#if CFG_FPGA_PLATFORM
7290 SEQ_printf(m, "[FPGA Dummy]DRV_CFG_NFIA(0x0)=0x0\n");
7291 SEQ_printf(m, "[FPGA Dummy]DRV_CFG_NFIB(0x0)=0x0\n");
7292#else
7293 SEQ_printf(m, "DRV_CFG_NFIA(IO PAD:0x%x)=0x%x\n",(GPIO_BASE+0xC20),*((volatile u32 *)(GPIO_BASE+0xC20)));
7294 SEQ_printf(m, "DRV_CFG_NFIB(CTRL PAD:0x%x)=0x%x\n",(GPIO_BASE+0xB50),*((volatile u32 *)(GPIO_BASE+0xB50)));
7295#endif
7296#if CFG_PERFLOG_DEBUG
7297 SEQ_printf(m, "Read Page Count:%d, Read Page totalTime:%lu, Avg. RPage:%lu\r\n",
7298 g_NandPerfLog.ReadPageCount,g_NandPerfLog.ReadPageTotalTime,
7299 g_NandPerfLog.ReadPageCount ? (g_NandPerfLog.ReadPageTotalTime/g_NandPerfLog.ReadPageCount): 0);
7300
7301 SEQ_printf(m, "Read subPage Count:%d, Read subPage totalTime:%lu, Avg. RPage:%lu\r\n",
7302 g_NandPerfLog.ReadSubPageCount,g_NandPerfLog.ReadSubPageTotalTime,
7303 g_NandPerfLog.ReadSubPageCount? (g_NandPerfLog.ReadSubPageTotalTime/g_NandPerfLog.ReadSubPageCount): 0);
7304
7305 SEQ_printf(m, "Read Busy Count:%d, Read Busy totalTime:%lu, Avg. R Busy:%lu\r\n",
7306 g_NandPerfLog.ReadBusyCount,g_NandPerfLog.ReadBusyTotalTime,
7307 g_NandPerfLog.ReadBusyCount? (g_NandPerfLog.ReadBusyTotalTime/g_NandPerfLog.ReadBusyCount): 0);
7308
7309 SEQ_printf(m, "Read DMA Count:%d, Read DMA totalTime:%lu, Avg. R DMA:%lu\r\n",
7310 g_NandPerfLog.ReadDMACount,g_NandPerfLog.ReadDMATotalTime,
7311 g_NandPerfLog.ReadDMACount? (g_NandPerfLog.ReadDMATotalTime/g_NandPerfLog.ReadDMACount): 0);
7312
7313 SEQ_printf(m, "Write Page Count:%d, Write Page totalTime:%lu, Avg. WPage:%lu\r\n",
7314 g_NandPerfLog.WritePageCount,g_NandPerfLog.WritePageTotalTime,
7315 g_NandPerfLog.WritePageCount? (g_NandPerfLog.WritePageTotalTime/g_NandPerfLog.WritePageCount): 0);
7316
7317 SEQ_printf(m, "Write Busy Count:%d, Write Busy totalTime:%lu, Avg. W Busy:%lu\r\n",
7318 g_NandPerfLog.WriteBusyCount,g_NandPerfLog.WriteBusyTotalTime,
7319 g_NandPerfLog.WriteBusyCount? (g_NandPerfLog.WriteBusyTotalTime/g_NandPerfLog.WriteBusyCount): 0);
7320
7321 SEQ_printf(m, "Write DMA Count:%d, Write DMA totalTime:%lu, Avg. W DMA:%lu\r\n",
7322 g_NandPerfLog.WriteDMACount,g_NandPerfLog.WriteDMATotalTime,
7323 g_NandPerfLog.WriteDMACount? (g_NandPerfLog.WriteDMATotalTime/g_NandPerfLog.WriteDMACount): 0);
7324
7325 SEQ_printf(m, "EraseBlock Count:%d, EraseBlock totalTime:%lu, Avg. Erase:%lu\r\n",
7326 g_NandPerfLog.EraseBlockCount,g_NandPerfLog.EraseBlockTotalTime,
7327 g_NandPerfLog.EraseBlockCount? (g_NandPerfLog.EraseBlockTotalTime/g_NandPerfLog.EraseBlockCount): 0);
7328
7329#endif
7330 return 0;
7331}
7332
7333
7334static int mt_nand_proc_open(struct inode *inode, struct file *file)
7335{
7336 return single_open(file, mtk_nand_proc_show, inode->i_private);
7337}
7338
7339
7340static const struct file_operations mtk_nand_fops = {
7341 .open = mt_nand_proc_open,
7342 .write = mtk_nand_proc_write,
7343 .read = seq_read,
7344 .llseek = seq_lseek,
7345 .release = single_release,
7346};
7347static int __init mtk_nand_init(void)
7348{
7349 struct proc_dir_entry *entry;
7350 g_i4Interrupt = 0;
7351
7352#if defined(NAND_OTP_SUPPORT)
7353 int err = 0;
7354 MSG(OTP, "OTP: register NAND OTP device ...\n");
7355 err = misc_register(&nand_otp_dev);
7356 if (unlikely(err))
7357 {
7358 MSG(OTP, "OTP: failed to register NAND OTP device!\n");
7359 return err;
7360 }
7361 spin_lock_init(&g_OTPLock);
7362#endif
7363
7364#if (defined(NAND_OTP_SUPPORT) && SAMSUNG_OTP_SUPPORT)
7365 g_mtk_otp_fuc.OTPQueryLength = samsung_OTPQueryLength;
7366 g_mtk_otp_fuc.OTPRead = samsung_OTPRead;
7367 g_mtk_otp_fuc.OTPWrite = samsung_OTPWrite;
7368#endif
7369
7370 entry = proc_create(PROCNAME, 0664, NULL, &mtk_nand_fops);
7371 #if 0//removed in kernel 3.10
7372 entry = create_proc_entry(PROCNAME, 0664, NULL);
7373 if (entry == NULL)
7374 {
7375 MSG(INIT, "MTK Nand : unable to create /proc entry\n");
7376 return -ENOMEM;
7377 }
7378 entry->read_proc = mtk_nand_proc_read;
7379 entry->write_proc = mtk_nand_proc_write;
7380 #endif
7381
7382 //printk("MediaTek Nand driver init, version %s\n", VERSION);
7383
7384 return platform_driver_register(&mtk_nand_driver);
7385}
7386
7387/******************************************************************************
7388 * mtk_nand_exit
7389 *
7390 * DESCRIPTION:
7391 * Free the device driver !
7392 *
7393 * PARAMETERS:
7394 * None
7395 *
7396 * RETURNS:
7397 * None
7398 *
7399 * NOTES:
7400 * None
7401 *
7402 ******************************************************************************/
7403static void __exit mtk_nand_exit(void)
7404{
7405 MSG(INIT, "MediaTek Nand driver exit, version %s\n", VERSION);
7406#if defined(NAND_OTP_SUPPORT)
7407 misc_deregister(&nand_otp_dev);
7408#endif
7409
7410#ifdef SAMSUNG_OTP_SUPPORT
7411 g_mtk_otp_fuc.OTPQueryLength = NULL;
7412 g_mtk_otp_fuc.OTPRead = NULL;
7413 g_mtk_otp_fuc.OTPWrite = NULL;
7414#endif
7415
7416 platform_driver_unregister(&mtk_nand_driver);
7417 remove_proc_entry(PROCNAME, NULL);
7418}
7419
7420module_init(mtk_nand_init);
7421module_exit(mtk_nand_exit);
7422MODULE_LICENSE("GPL");