1 /******************************************************************************
2 * mtk_nand.c - MTK NAND Flash Device Driver
4 * Copyright 2009-2012 MediaTek Co.,Ltd.
7 * This file provid the other drivers nand relative functions
10 * ----------------------------------------
11 * v3.0, 11 Feb 2010, mtk
12 * ----------------------------------------
13 ******************************************************************************/
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/delay.h>
19 #include <linux/errno.h>
20 #include <linux/sched.h>
21 #include <linux/types.h>
22 #include <linux/wait.h>
23 #include <linux/spinlock.h>
24 #include <linux/interrupt.h>
25 #include <linux/mtd/mtd.h>
26 #include <linux/mtd/nand.h>
27 #include <linux/mtd/partitions.h>
28 #include <linux/mtd/nand_ecc.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/jiffies.h>
31 #include <linux/platform_device.h>
32 #include <linux/proc_fs.h>
33 #include <linux/seq_file.h>
34 #include <linux/time.h>
36 #include <linux/xlog.h>
38 #include <asm/cacheflush.h>
39 #include <asm/uaccess.h>
40 #include <linux/miscdevice.h>
41 #include <mach/mtk_nand.h>
43 #include <mach/devs.h>
44 #include <mach/mt_reg_base.h>
45 #include <mach/mt_typedefs.h>
46 #include <mach/mt_clkmgr.h>
47 #include <mach/mtk_nand.h>
49 #include <mach/mt_irq.h>
50 //#include "partition.h"
51 #include <asm/system.h>
52 #include "partition_define.h"
53 #include <mach/mt_boot.h>
54 //#include "../../../../../../source/kernel/drivers/aee/ipanic/ipanic.h"
55 #include <linux/rtc.h>
56 #include <mach/mt_gpio.h>
57 #include <mach/mt_pm_ldo.h>
58 #ifdef CONFIG_PWR_LOSS_MTK_SPOH
59 #include <mach/power_loss_test.h>
61 #include <mach/nand_device_define.h>
63 #define VERSION "v2.1 Fix AHB virt2phys error"
64 #define MODULE_NAME "# MTK NAND #"
65 #define PROCNAME "driver/nand"
67 #define _MTK_NAND_DUMMY_DRIVER_
68 #define __INTERNAL_USE_AHB_MODE__ (1)
69 #define CFG_FPGA_PLATFORM (0) // for fpga by bean
70 #define CFG_RANDOMIZER (1) // for randomizer code
71 #define CFG_PERFLOG_DEBUG (0) // for performance log
72 #define CFG_2CS_NAND (1) // for 2CS nand
73 #define CFG_COMBO_NAND (1) // for Combo nand
75 #define NFI_TRICKY_CS (1) // must be 1 or > 1?
77 #define PERI_NFI_CLK_SOURCE_SEL ((volatile P_U32)(PERICFG_BASE+0x424))
78 #define PERI_NFI_MAC_CTRL ((volatile P_U32)(PERICFG_BASE+0x428))
79 #define NFI_PAD_1X_CLOCK (1) //nfi1X
81 void show_stack(struct task_struct
*tsk
, unsigned long *sp
);
82 extern void mt_irq_set_sens(unsigned int irq
, unsigned int sens
);
83 extern void mt_irq_set_polarity(unsigned int irq
,unsigned int polarity
);
85 extern struct mtd_partition g_pasStatic_Partition
[PART_MAX_COUNT
];
87 #if defined(MTK_MLC_NAND_SUPPORT)
88 bool MLC_DEVICE
= TRUE
;// to build pass xiaolei
91 #if defined(NAND_OTP_SUPPORT)
93 #define SAMSUNG_OTP_SUPPORT 1
94 #define OTP_MAGIC_NUM 0x4E3AF28B
95 #define SAMSUNG_OTP_PAGE_NUM 6
97 static const unsigned int Samsung_OTP_Page
[SAMSUNG_OTP_PAGE_NUM
] = { 0x15, 0x16, 0x17, 0x18, 0x19, 0x1b };
99 static struct mtk_otp_config g_mtk_otp_fuc
;
100 static spinlock_t g_OTPLock
;
102 #define OTP_MAGIC 'k'
104 /* NAND OTP IO control number */
105 #define OTP_GET_LENGTH _IOW(OTP_MAGIC, 1, int)
106 #define OTP_READ _IOW(OTP_MAGIC, 2, int)
107 #define OTP_WRITE _IOW(OTP_MAGIC, 3, int)
109 #define FS_OTP_READ 0
110 #define FS_OTP_WRITE 1
112 /* NAND OTP Error codes */
113 #define OTP_SUCCESS 0
114 #define OTP_ERROR_OVERSCOPE -1
115 #define OTP_ERROR_TIMEOUT -2
116 #define OTP_ERROR_BUSY -3
117 #define OTP_ERROR_NOMEM -4
118 #define OTP_ERROR_RESET -5
120 struct mtk_otp_config
122 u32(*OTPRead
) (u32 PageAddr
, void *BufferPtr
, void *SparePtr
);
123 u32(*OTPWrite
) (u32 PageAddr
, void *BufferPtr
, void *SparePtr
);
124 u32(*OTPQueryLength
) (u32
* Length
);
129 unsigned int QLength
;
137 #define ERR_RTN_SUCCESS 1
138 #define ERR_RTN_FAIL 0
139 #define ERR_RTN_BCH_FAIL -1
141 #define NFI_SET_REG32(reg, value) \
143 g_value = (DRV_Reg32(reg) | (value));\
144 DRV_WriteReg32(reg, g_value); \
147 #define NFI_SET_REG16(reg, value) \
149 g_value = (DRV_Reg16(reg) | (value));\
150 DRV_WriteReg16(reg, g_value); \
153 #define NFI_CLN_REG32(reg, value) \
155 g_value = (DRV_Reg32(reg) & (~(value)));\
156 DRV_WriteReg32(reg, g_value); \
159 #define NFI_CLN_REG16(reg, value) \
161 g_value = (DRV_Reg16(reg) & (~(value)));\
162 DRV_WriteReg16(reg, g_value); \
165 #define NFI_WAIT_STATE_DONE(state) do{;}while (__raw_readl(NFI_STA_REG32) & state)
166 #define NFI_WAIT_TO_READY() do{;}while (!(__raw_readl(NFI_STA_REG32) & STA_BUSY2READY))
167 #define FIFO_PIO_READY(x) (0x1 & x)
168 #define WAIT_NFI_PIO_READY(timeout) \
170 while( (!FIFO_PIO_READY(DRV_Reg(NFI_PIO_DIRDY_REG16))) && (--timeout) );\
174 #define NAND_SECTOR_SIZE (512)
175 #define OOB_PER_SECTOR (16)
176 #define OOB_AVAI_PER_SECTOR (8)
178 #if defined(MTK_COMBO_NAND_SUPPORT)
179 // BMT_POOL_SIZE is not used anymore
181 #ifndef PART_SIZE_BMTPOOL
182 #define BMT_POOL_SIZE (80)
184 #define BMT_POOL_SIZE (PART_SIZE_BMTPOOL)
188 #define PMT_POOL_SIZE (2)
189 /*******************************************************************************
190 * Gloable Varible Definition
191 *******************************************************************************/
192 #if CFG_PERFLOG_DEBUG
195 unsigned int ReadPageCount
;
196 suseconds_t ReadPageTotalTime
;
197 unsigned int ReadBusyCount
;
198 suseconds_t ReadBusyTotalTime
;
199 unsigned int ReadDMACount
;
200 suseconds_t ReadDMATotalTime
;
202 unsigned int ReadSubPageCount
;
203 suseconds_t ReadSubPageTotalTime
;
205 unsigned int WritePageCount
;
206 suseconds_t WritePageTotalTime
;
207 unsigned int WriteBusyCount
;
208 suseconds_t WriteBusyTotalTime
;
209 unsigned int WriteDMACount
;
210 suseconds_t WriteDMATotalTime
;
212 unsigned int EraseBlockCount
;
213 suseconds_t EraseBlockTotalTime
;
219 #define PL_TIME_RAND_PROG(chip, page_addr, time) do { \
220 if(host->pl.nand_program_wdt_enable == 1){ \
221 PL_TIME_RAND(page_addr, time, host->pl.last_prog_time);} \
226 #define PL_TIME_RAND_ERASE(chip, page_addr, time) do { \
227 if(host->pl.nand_erase_wdt_enable == 1){ \
228 PL_TIME_RAND(page_addr, time, host->pl.last_erase_time); \
230 printk(KERN_ERR "[MVG_TEST]: Erase reset in %d us\n", time);} \
235 #define PL_TIME_PROG(duration) do { \
236 host->pl.last_prog_time = duration; \
239 #define PL_TIME_ERASE(duration) do { \
240 host->pl.last_erase_time = duration; \
244 #define PL_TIME_PROG_WDT_SET(WDT) do { \
245 host->pl.nand_program_wdt_enable = WDT; \
248 #define PL_TIME_ERASE_WDT_SET(WDT) do { \
249 host->pl.nand_erase_wdt_enable = WDT; \
252 #define PL_NAND_BEGIN(time) PL_BEGIN(time)
254 #define PL_NAND_RESET(time) PL_RESET(time)
256 #define PL_NAND_END(pl_time_write, duration) PL_END(pl_time_write, duration)
261 #define PL_TIME_RAND_PROG(chip, page_addr, time)
262 #define PL_TIME_RAND_ERASE(chip, page_addr, time)
264 #define PL_TIME_PROG(duration)
265 #define PL_TIME_ERASE(duration)
267 #define PL_TIME_PROG_WDT_SET(WDT)
268 #define PL_TIME_ERASE_WDT_SET(WDT)
270 #define PL_NAND_BEGIN(time)
271 #define PL_NAND_RESET(time)
272 #define PL_NAND_END(pl_time_write, duration)
276 #if CFG_PERFLOG_DEBUG
277 static struct nand_perf_log g_NandPerfLog
={0};
278 static struct timeval g_NandLogTimer
={0};
282 static suseconds_t g_PFM_R
= 0;
283 static suseconds_t g_PFM_W
= 0;
284 static suseconds_t g_PFM_E
= 0;
285 static u32 g_PFM_RNum
= 0;
286 static u32 g_PFM_RD
= 0;
287 static u32 g_PFM_WD
= 0;
288 static struct timeval g_now
;
290 #define PFM_BEGIN(time) \
291 do_gettimeofday(&g_now); \
294 #define PFM_END_R(time, n) \
295 do_gettimeofday(&g_now); \
296 g_PFM_R += (g_now.tv_sec * 1000000 + g_now.tv_usec) - (time.tv_sec * 1000000 + time.tv_usec); \
299 MSG(PERFORMANCE, "%s - Read PFM: %lu, data: %d, ReadOOB: %d (%d, %d)\n", MODULE_NAME , g_PFM_R, g_PFM_RD, g_kCMD.pureReadOOB, g_kCMD.pureReadOOBNum, g_PFM_RNum);
301 #define PFM_END_W(time, n) \
302 do_gettimeofday(&g_now); \
303 g_PFM_W += (g_now.tv_sec * 1000000 + g_now.tv_usec) - (time.tv_sec * 1000000 + time.tv_usec); \
305 MSG(PERFORMANCE, "%s - Write PFM: %lu, data: %d\n", MODULE_NAME, g_PFM_W, g_PFM_WD);
307 #define PFM_END_E(time) \
308 do_gettimeofday(&g_now); \
309 g_PFM_E += (g_now.tv_sec * 1000000 + g_now.tv_usec) - (time.tv_sec * 1000000 + time.tv_usec); \
310 MSG(PERFORMANCE, "%s - Erase PFM: %lu\n", MODULE_NAME, g_PFM_E);
312 #define PFM_BEGIN(time)
313 #define PFM_END_R(time, n)
314 #define PFM_END_W(time, n)
315 #define PFM_END_E(time)
318 #define TIMEOUT_1 0x1fff
319 #define TIMEOUT_2 0x8ff
320 #define TIMEOUT_3 0xffff
321 #define TIMEOUT_4 0xffff //5000 //PIO
323 #define NFI_ISSUE_COMMAND(cmd, col_addr, row_addr, col_num, row_num) \
325 DRV_WriteReg(NFI_CMD_REG16,cmd);\
326 while (DRV_Reg32(NFI_STA_REG32) & STA_CMD_STATE);\
327 DRV_WriteReg32(NFI_COLADDR_REG32, col_addr);\
328 DRV_WriteReg32(NFI_ROWADDR_REG32, row_addr);\
329 DRV_WriteReg(NFI_ADDRNOB_REG16, col_num | (row_num<<ADDR_ROW_NOB_SHIFT));\
330 while (DRV_Reg32(NFI_STA_REG32) & STA_ADDR_STATE);\
333 //-------------------------------------------------------------------------------
334 static struct completion g_comp_AHB_Done
;
335 static struct NAND_CMD g_kCMD
;
337 static int g_i4Interrupt
;
338 static bool g_bcmdstatus
;
339 //static bool g_brandstatus;
340 static u32 g_value
= 0;
341 static int g_page_size
;
342 static int g_block_size
;
343 static u32 PAGES_PER_BLOCK
= 255;
344 static bool g_bSyncOrToggle
= false;
345 static int g_iNFI2X_CLKSRC
= ARMPLL
;
346 //extern unsigned int flash_number;
347 //extern flashdev_info_t gen_FlashTable_p[MAX_FLASH];
351 bool g_b2Die_CS
= FALSE
; // for nand base
352 static bool g_bTricky_CS
= FALSE
;
353 static u32 g_nanddie_pages
= 0;
356 #if __INTERNAL_USE_AHB_MODE__
357 BOOL g_bHwEcc
= true;
359 BOOL g_bHwEcc
= false;
364 static u8
*local_buffer_16_align
; // 16 byte aligned buffer, for HW issue
365 __attribute__((aligned(64))) static u8 local_buffer
[LPAGE
+ LSPARE
];
366 static u8
*temp_buffer_16_align
; // 16 byte aligned buffer, for HW issue
367 __attribute__((aligned(64))) static u8 temp_buffer
[LPAGE
+ LSPARE
];
368 //static u8 *bean_buffer_16_align; // 16 byte aligned buffer, for HW issue
369 //__attribute__((aligned(64))) static u8 bean_buffer[LPAGE + LSPARE];
372 extern struct mtd_perf_log g_MtdPerfLog
;
374 extern void nand_release_device(struct mtd_info
*mtd
);
375 extern int nand_get_device(struct mtd_info
*mtd
, int new_state
);
376 bool mtk_nand_SetFeature(struct mtd_info
*mtd
, u16 cmd
, u32 addr
, u8
*value
, u8 bytes
);
377 bool mtk_nand_GetFeature(struct mtd_info
*mtd
, u16 cmd
, u32 addr
, u8
*value
, u8 bytes
);
380 static int mtk_nand_cs_check(struct mtd_info
*mtd
, u8
*id
, u16 cs
);
381 static u32
mtk_nand_cs_on(struct nand_chip
*nand_chip
, u16 cs
, u32 page
);
385 static bmt_struct
*g_bmt
;
386 struct mtk_nand_host
*host
;
387 static u8 g_running_dma
= 0;
388 #ifdef DUMP_NATIVE_BACKTRACE
389 static u32 g_dump_count
= 0;
391 //extern struct mtd_partition g_pasStatic_Partition[];//to build pass xiaolei
392 //int part_num = PART_NUM;//to build pass xiaolei NUM_PARTITIONS;
394 extern void part_init_pmt(struct mtd_info
*mtd
, u8
* buf
);
395 extern struct mtd_partition g_exist_Partition
[];
400 static u8 local_oob_buf
[LSPARE
];
402 #ifdef _MTK_NAND_DUMMY_DRIVER_
403 int dummy_driver_debug
;
406 flashdev_info_t devinfo
;
413 TYPE_MLC
= 0x4, // 1b0
414 TYPE_SLC
= 0x4, // 1b1
417 u32
MICRON_TRANSFER(u32 pageNo
);
418 u32
SANDISK_TRANSFER(u32 pageNo
);
419 u32
HYNIX_TRANSFER(u32 pageNo
);
420 u32
hynix_pairpage_mapping(u32 page
, bool high_to_low
);
421 u32
micron_pairpage_mapping(u32 page
, bool high_to_low
);
422 u32
sandisk_pairpage_mapping(u32 page
, bool high_to_low
);
424 typedef u32 (*GetLowPageNumber
)(u32 pageNo
);
425 typedef u32 (*TransferPageNumber
)(u32 pageNo
, bool high_to_low
);
427 GetLowPageNumber functArray
[]=
434 TransferPageNumber fsFuncArray
[]=
436 micron_pairpage_mapping
,
437 hynix_pairpage_mapping
,
438 sandisk_pairpage_mapping
,
441 u32
SANDISK_TRANSFER(u32 pageNo
)
449 return pageNo
+pageNo
-1;
453 u32
HYNIX_TRANSFER(u32 pageNo
)
458 temp
= pageNo
+(pageNo
&0xFFFFFFFE)-2;
463 u32
MICRON_TRANSFER(u32 pageNo
)
468 temp
= (pageNo
- 4) & 0xFFFFFFFE;
470 return (pageNo
+temp
);
472 return (pageNo
+temp
-2);
475 u32
sandisk_pairpage_mapping(u32 page
, bool high_to_low
)
477 if(TRUE
== high_to_low
)
481 if((page
== 0) || (1 == (page
%2)))
493 if((page
!= 0) && (0 == (page
%2)))
499 if(page
== 0 || page
== 253)
507 u32
hynix_pairpage_mapping(u32 page
, bool high_to_low
)
510 if(TRUE
== high_to_low
)
519 if(offset
==2 || offset
==3)
525 if(page
== 4 || page
== 5 || page
== 254 || page
== 255)
537 if(page
== 0 || page
== 1)
540 if(offset
==0 || offset
==1)
551 u32
micron_pairpage_mapping(u32 page
, bool high_to_low
)
554 if(TRUE
== high_to_low
)
557 if((page
<4)||(page
>251))
563 if(offset
==0 || offset
==1)
574 if((page
== 2) || (page
== 3) ||(page
>247))
579 if(offset
==0 || offset
==1)
590 int mtk_nand_paired_page_transfer(u32 pageNo
, bool high_to_low
)
592 if(devinfo
.vendor
!= VEND_NONE
)
594 return fsFuncArray
[devinfo
.feature_set
.ptbl_idx
](pageNo
,high_to_low
);
602 #if 0//#if CFG_FPGA_PLATFORM
603 void nand_enable_clock(void)
608 void nand_disable_clock(void)
615 void nand_enable_clock(void)
617 if(clock_is_on(MT_CG_PERI_NFI
)==PWR_DOWN
)
618 enable_clock(MT_CG_PERI_NFI
, "NFI");
619 if(clock_is_on(MT_CG_PERI_NFI_ECC
)==PWR_DOWN
)
620 enable_clock(MT_CG_PERI_NFI_ECC
, "NFI");
621 if(clock_is_on(MT_CG_PERI_NFIPAD
)==PWR_DOWN
)
622 enable_clock(MT_CG_PERI_NFIPAD
, "NFI");
625 void nand_disable_clock(void)
627 if(clock_is_on(MT_CG_PERI_NFIPAD
)==PWR_ON
)
628 disable_clock(MT_CG_PERI_NFIPAD
, "NFI");
629 if(clock_is_on(MT_CG_PERI_NFI_ECC
)==PWR_ON
)
630 disable_clock(MT_CG_PERI_NFI_ECC
, "NFI");
631 if(clock_is_on(MT_CG_PERI_NFI
)==PWR_ON
)
632 disable_clock(MT_CG_PERI_NFI
, "NFI");
636 static struct nand_ecclayout nand_oob_16
= {
638 .eccpos
= {8, 9, 10, 11, 12, 13, 14, 15},
639 .oobfree
= {{1, 6}, {0, 0}}
642 struct nand_ecclayout nand_oob_64
= {
644 .eccpos
= {32, 33, 34, 35, 36, 37, 38, 39,
645 40, 41, 42, 43, 44, 45, 46, 47,
646 48, 49, 50, 51, 52, 53, 54, 55,
647 56, 57, 58, 59, 60, 61, 62, 63},
648 .oobfree
= {{1, 7}, {9, 7}, {17, 7}, {25, 6}, {0, 0}}
651 struct nand_ecclayout nand_oob_128
= {
654 64, 65, 66, 67, 68, 69, 70, 71,
655 72, 73, 74, 75, 76, 77, 78, 79,
656 80, 81, 82, 83, 84, 85, 86, 86,
657 88, 89, 90, 91, 92, 93, 94, 95,
658 96, 97, 98, 99, 100, 101, 102, 103,
659 104, 105, 106, 107, 108, 109, 110, 111,
660 112, 113, 114, 115, 116, 117, 118, 119,
661 120, 121, 122, 123, 124, 125, 126, 127},
662 .oobfree
= {{1, 7}, {9, 7}, {17, 7}, {25, 7}, {33, 7}, {41, 7}, {49, 7}, {57, 6}}
665 /**************************************************************************
667 **************************************************************************/
668 #define SS_SEED_NUM 128
669 #define EFUSE_RANDOM_CFG ((volatile u32 *)(0xF02061c0))
670 #define EFUSE_RANDOM_ENABLE 0x00000004
671 static bool use_randomizer
= FALSE
;
672 static bool pre_randomizer
= FALSE
;
674 static U16 SS_RANDOM_SEED
[SS_SEED_NUM
] =
677 0x576A, 0x05E8, 0x629D, 0x45A3, 0x649C, 0x4BF0, 0x2342, 0x272E,
678 0x7358, 0x4FF3, 0x73EC, 0x5F70, 0x7A60, 0x1AD8, 0x3472, 0x3612,
679 0x224F, 0x0454, 0x030E, 0x70A5, 0x7809, 0x2521, 0x484F, 0x5A2D,
680 0x492A, 0x043D, 0x7F61, 0x3969, 0x517A, 0x3B42, 0x769D, 0x0647,
681 0x7E2A, 0x1383, 0x49D9, 0x07B8, 0x2578, 0x4EEC, 0x4423, 0x352F,
682 0x5B22, 0x72B9, 0x367B, 0x24B6, 0x7E8E, 0x2318, 0x6BD0, 0x5519,
683 0x1783, 0x18A7, 0x7B6E, 0x7602, 0x4B7F, 0x3648, 0x2C53, 0x6B99,
684 0x0C23, 0x67CF, 0x7E0E, 0x4D8C, 0x5079, 0x209D, 0x244A, 0x747B,
685 0x350B, 0x0E4D, 0x7004, 0x6AC3, 0x7F3E, 0x21F5, 0x7A15, 0x2379,
686 0x1517, 0x1ABA, 0x4E77, 0x15A1, 0x04FA, 0x2D61, 0x253A, 0x1302,
687 0x1F63, 0x5AB3, 0x049A, 0x5AE8, 0x1CD7, 0x4A00, 0x30C8, 0x3247,
688 0x729C, 0x5034, 0x2B0E, 0x57F2, 0x00E4, 0x575B, 0x6192, 0x38F8,
689 0x2F6A, 0x0C14, 0x45FC, 0x41DF, 0x38DA, 0x7AE1, 0x7322, 0x62DF,
690 0x5E39, 0x0E64, 0x6D85, 0x5951, 0x5937, 0x6281, 0x33A1, 0x6A32,
691 0x3A5A, 0x2BAC, 0x743A, 0x5E74, 0x3B2E, 0x7EC7, 0x4FD2, 0x5D28,
692 0x751F, 0x3EF8, 0x39B1, 0x4E49, 0x746B, 0x6EF6, 0x44BE, 0x6DB7
696 //#if CFG_PERFLOG_DEBUG
697 static suseconds_t
Cal_timediff(struct timeval
*end_time
,struct timeval
*start_time
)
699 struct timeval difference
;
701 difference
.tv_sec
=end_time
->tv_sec
-start_time
->tv_sec
;
702 difference
.tv_usec
=end_time
->tv_usec
-start_time
->tv_usec
;
704 /* Using while instead of if below makes the code slightly more robust. */
706 while(difference
.tv_usec
<0)
708 difference
.tv_usec
+=1000000;
709 difference
.tv_sec
-=1;
712 return 1000000LL*difference
.tv_sec
+
715 } /* timeval_diff() */
716 #if CFG_PERFLOG_DEBUG
718 void dump_nand_rwcount(void)
720 struct timeval now_time
;
721 do_gettimeofday(&now_time
);
722 if(Cal_timediff(&now_time
,&g_NandLogTimer
)>(500*1000)) // Dump per 100ms
724 MSG(INIT
, " RPageCnt: %d (%lu us) RSubCnt: %d (%lu us) WPageCnt: %d (%lu us) ECnt: %d mtd(0/512/1K/2K/3K/4K): %d %d %d %d %d %d\n ",
725 g_NandPerfLog
.ReadPageCount
,
726 g_NandPerfLog
.ReadPageCount
? (g_NandPerfLog
.ReadPageTotalTime
/g_NandPerfLog
.ReadPageCount
): 0,
727 g_NandPerfLog
.ReadSubPageCount
,
728 g_NandPerfLog
.ReadSubPageCount
? (g_NandPerfLog
.ReadSubPageTotalTime
/g_NandPerfLog
.ReadSubPageCount
): 0,
729 g_NandPerfLog
.WritePageCount
,
730 g_NandPerfLog
.WritePageCount
? (g_NandPerfLog
.WritePageTotalTime
/g_NandPerfLog
.WritePageCount
): 0,
731 g_NandPerfLog
.EraseBlockCount
,
732 g_MtdPerfLog
.read_size_0_512
,
733 g_MtdPerfLog
.read_size_512_1K
,
734 g_MtdPerfLog
.read_size_1K_2K
,
735 g_MtdPerfLog
.read_size_2K_3K
,
736 g_MtdPerfLog
.read_size_3K_4K
,
737 g_MtdPerfLog
.read_size_Above_4K
740 memset(&g_NandPerfLog
,0x00,sizeof(g_NandPerfLog
));
741 memset(&g_MtdPerfLog
,0x00,sizeof(g_MtdPerfLog
));
742 do_gettimeofday(&g_NandLogTimer
);
750 printk("~~~~Dump NFI Register in Kernel~~~~\n");
751 printk("NFI_CNFG_REG16: 0x%x\n", DRV_Reg16(NFI_CNFG_REG16
));
752 printk("NFI_PAGEFMT_REG16: 0x%x\n", DRV_Reg16(NFI_PAGEFMT_REG16
));
753 printk("NFI_CON_REG16: 0x%x\n", DRV_Reg16(NFI_CON_REG16
));
754 printk("NFI_ACCCON_REG32: 0x%x\n", DRV_Reg32(NFI_ACCCON_REG32
));
755 printk("NFI_INTR_EN_REG16: 0x%x\n", DRV_Reg16(NFI_INTR_EN_REG16
));
756 printk("NFI_INTR_REG16: 0x%x\n", DRV_Reg16(NFI_INTR_REG16
));
757 printk("NFI_CMD_REG16: 0x%x\n", DRV_Reg16(NFI_CMD_REG16
));
758 printk("NFI_ADDRNOB_REG16: 0x%x\n", DRV_Reg16(NFI_ADDRNOB_REG16
));
759 printk("NFI_COLADDR_REG32: 0x%x\n", DRV_Reg32(NFI_COLADDR_REG32
));
760 printk("NFI_ROWADDR_REG32: 0x%x\n", DRV_Reg32(NFI_ROWADDR_REG32
));
761 printk("NFI_STRDATA_REG16: 0x%x\n", DRV_Reg16(NFI_STRDATA_REG16
));
762 printk("NFI_DATAW_REG32: 0x%x\n", DRV_Reg32(NFI_DATAW_REG32
));
763 printk("NFI_DATAR_REG32: 0x%x\n", DRV_Reg32(NFI_DATAR_REG32
));
764 printk("NFI_PIO_DIRDY_REG16: 0x%x\n", DRV_Reg16(NFI_PIO_DIRDY_REG16
));
765 printk("NFI_STA_REG32: 0x%x\n", DRV_Reg32(NFI_STA_REG32
));
766 printk("NFI_FIFOSTA_REG16: 0x%x\n", DRV_Reg16(NFI_FIFOSTA_REG16
));
767 // printk("NFI_LOCKSTA_REG16: 0x%x\n", DRV_Reg16(NFI_LOCKSTA_REG16));
768 printk("NFI_ADDRCNTR_REG16: 0x%x\n", DRV_Reg16(NFI_ADDRCNTR_REG16
));
769 printk("NFI_STRADDR_REG32: 0x%x\n", DRV_Reg32(NFI_STRADDR_REG32
));
770 printk("NFI_BYTELEN_REG16: 0x%x\n", DRV_Reg16(NFI_BYTELEN_REG16
));
771 printk("NFI_CSEL_REG16: 0x%x\n", DRV_Reg16(NFI_CSEL_REG16
));
772 printk("NFI_IOCON_REG16: 0x%x\n", DRV_Reg16(NFI_IOCON_REG16
));
773 printk("NFI_FDM0L_REG32: 0x%x\n", DRV_Reg32(NFI_FDM0L_REG32
));
774 printk("NFI_FDM0M_REG32: 0x%x\n", DRV_Reg32(NFI_FDM0M_REG32
));
775 printk("NFI_LOCK_REG16: 0x%x\n", DRV_Reg16(NFI_LOCK_REG16
));
776 printk("NFI_LOCKCON_REG32: 0x%x\n", DRV_Reg32(NFI_LOCKCON_REG32
));
777 printk("NFI_LOCKANOB_REG16: 0x%x\n", DRV_Reg16(NFI_LOCKANOB_REG16
));
778 printk("NFI_FIFODATA0_REG32: 0x%x\n", DRV_Reg32(NFI_FIFODATA0_REG32
));
779 printk("NFI_FIFODATA1_REG32: 0x%x\n", DRV_Reg32(NFI_FIFODATA1_REG32
));
780 printk("NFI_FIFODATA2_REG32: 0x%x\n", DRV_Reg32(NFI_FIFODATA2_REG32
));
781 printk("NFI_FIFODATA3_REG32: 0x%x\n", DRV_Reg32(NFI_FIFODATA3_REG32
));
782 printk("NFI_MASTERSTA_REG16: 0x%x\n", DRV_Reg16(NFI_MASTERSTA_REG16
));
783 printk("NFI_DEBUG_CON1_REG16: 0x%x\n", DRV_Reg16(NFI_DEBUG_CON1_REG16
));
784 printk("ECC_ENCCON_REG16 :%x\n",*ECC_ENCCON_REG16
);
785 printk("ECC_ENCCNFG_REG32 :%x\n",*ECC_ENCCNFG_REG32
);
786 printk("ECC_ENCDIADDR_REG32 :%x\n",*ECC_ENCDIADDR_REG32
);
787 printk("ECC_ENCIDLE_REG32 :%x\n",*ECC_ENCIDLE_REG32
);
788 printk("ECC_ENCPAR0_REG32 :%x\n",*ECC_ENCPAR0_REG32
);
789 printk("ECC_ENCPAR1_REG32 :%x\n",*ECC_ENCPAR1_REG32
);
790 printk("ECC_ENCPAR2_REG32 :%x\n",*ECC_ENCPAR2_REG32
);
791 printk("ECC_ENCPAR3_REG32 :%x\n",*ECC_ENCPAR3_REG32
);
792 printk("ECC_ENCPAR4_REG32 :%x\n",*ECC_ENCPAR4_REG32
);
793 printk("ECC_ENCPAR5_REG32 :%x\n",*ECC_ENCPAR5_REG32
);
794 printk("ECC_ENCPAR6_REG32 :%x\n",*ECC_ENCPAR6_REG32
);
795 printk("ECC_ENCSTA_REG32 :%x\n",*ECC_ENCSTA_REG32
);
796 printk("ECC_ENCIRQEN_REG16 :%x\n",*ECC_ENCIRQEN_REG16
);
797 printk("ECC_ENCIRQSTA_REG16 :%x\n",*ECC_ENCIRQSTA_REG16
);
798 printk("ECC_DECCON_REG16 :%x\n",*ECC_DECCON_REG16
);
799 printk("ECC_DECCNFG_REG32 :%x\n",*ECC_DECCNFG_REG32
);
800 printk("ECC_DECDIADDR_REG32 :%x\n",*ECC_DECDIADDR_REG32
);
801 printk("ECC_DECIDLE_REG16 :%x\n",*ECC_DECIDLE_REG16
);
802 printk("ECC_DECFER_REG16 :%x\n",*ECC_DECFER_REG16
);
803 printk("ECC_DECENUM0_REG32 :%x\n",*ECC_DECENUM0_REG32
);
804 printk("ECC_DECENUM1_REG32 :%x\n",*ECC_DECENUM1_REG32
);
805 printk("ECC_DECDONE_REG16 :%x\n",*ECC_DECDONE_REG16
);
806 printk("ECC_DECEL0_REG32 :%x\n",*ECC_DECEL0_REG32
);
807 printk("ECC_DECEL1_REG32 :%x\n",*ECC_DECEL1_REG32
);
808 printk("ECC_DECEL2_REG32 :%x\n",*ECC_DECEL2_REG32
);
809 printk("ECC_DECEL3_REG32 :%x\n",*ECC_DECEL3_REG32
);
810 printk("ECC_DECEL4_REG32 :%x\n",*ECC_DECEL4_REG32
);
811 printk("ECC_DECEL5_REG32 :%x\n",*ECC_DECEL5_REG32
);
812 printk("ECC_DECEL6_REG32 :%x\n",*ECC_DECEL6_REG32
);
813 printk("ECC_DECEL7_REG32 :%x\n",*ECC_DECEL7_REG32
);
814 printk("ECC_DECIRQEN_REG16 :%x\n",*ECC_DECIRQEN_REG16
);
815 printk("ECC_DECIRQSTA_REG16 :%x\n",*ECC_DECIRQSTA_REG16
);
816 printk("ECC_DECFSM_REG32 :%x\n",*ECC_DECFSM_REG32
);
817 printk("ECC_BYPASS_REG32 :%x\n",*ECC_BYPASS_REG32
);
818 printk("NFI clock : %s\n", (DRV_Reg32((volatile u32
*)(PERICFG_BASE
+0x18)) & (0x1)) ? "Clock Disabled" : "Clock Enabled");
819 printk("NFI clock SEL (MT8127):0x%x: %s\n",(PERICFG_BASE
+0x5C), (DRV_Reg32((volatile u32
*)(PERICFG_BASE
+0x5C)) & (0x1)) ? "Half clock" : "Quarter clock");
823 u8
NFI_DMA_status(void)
825 return g_running_dma
;
828 u32
NFI_DMA_address(void)
830 return DRV_Reg32(NFI_STRADDR_REG32
);
833 EXPORT_SYMBOL(NFI_DMA_status
);
834 EXPORT_SYMBOL(NFI_DMA_address
);
836 u32
nand_virt_to_phys_add(u32 va
)
838 u32 pageOffset
= (va
& (PAGE_SIZE
- 1));
844 if (virt_addr_valid(va
))
846 return __virt_to_phys(va
);
851 printk(KERN_ERR
"[nand_virt_to_phys_add] ERROR ,current is NULL! \n");
855 if (NULL
== current
->mm
)
857 printk(KERN_ERR
"[nand_virt_to_phys_add] ERROR current->mm is NULL! tgid=0x%x, name=%s \n", current
->tgid
, current
->comm
);
861 pgd
= pgd_offset(current
->mm
, va
); /* what is tsk->mm */
862 if (pgd_none(*pgd
) || pgd_bad(*pgd
))
864 printk(KERN_ERR
"[nand_virt_to_phys_add] ERROR, va=0x%x, pgd invalid! \n", va
);
868 pmd
= pmd_offset((pud_t
*)pgd
, va
);
869 if (pmd_none(*pmd
) || pmd_bad(*pmd
))
871 printk(KERN_ERR
"[nand_virt_to_phys_add] ERROR, va=0x%x, pmd invalid! \n", va
);
875 pte
= pte_offset_map(pmd
, va
);
876 if (pte_present(*pte
))
878 pa
= (pte_val(*pte
) & (PAGE_MASK
)) | pageOffset
;
882 printk(KERN_ERR
"[nand_virt_to_phys_add] ERROR va=0x%x, pte invalid! \n", va
);
886 EXPORT_SYMBOL(nand_virt_to_phys_add
);
888 bool get_device_info(u8
*id
, flashdev_info_t
*devinfo
)
893 for (i
= 0; i
<flash_number
; i
++){
895 for(m
=0;m
<gen_FlashTable_p
[i
].id_length
;m
++){
896 if(id
[m
]!=gen_FlashTable_p
[i
].id
[m
]){
901 if(mismatch
== 0 && gen_FlashTable_p
[i
].id_length
> target_id_len
){
903 target_id_len
=gen_FlashTable_p
[i
].id_length
;
908 MSG(INIT
, "Recognize NAND: ID [");
909 for(n
=0;n
<gen_FlashTable_p
[target
].id_length
;n
++){
910 devinfo
->id
[n
] = gen_FlashTable_p
[target
].id
[n
];
911 MSG(INIT
, "%x ",devinfo
->id
[n
]);
913 MSG(INIT
, "], Device Name [%s], Page Size [%d]B Spare Size [%d]B Total Size [%d]MB\n",gen_FlashTable_p
[target
].devciename
,gen_FlashTable_p
[target
].pagesize
,gen_FlashTable_p
[target
].sparesize
,gen_FlashTable_p
[target
].totalsize
);
914 devinfo
->id_length
=gen_FlashTable_p
[target
].id_length
;
915 devinfo
->blocksize
= gen_FlashTable_p
[target
].blocksize
;
916 devinfo
->addr_cycle
= gen_FlashTable_p
[target
].addr_cycle
;
917 devinfo
->iowidth
= gen_FlashTable_p
[target
].iowidth
;
918 devinfo
->timmingsetting
= gen_FlashTable_p
[target
].timmingsetting
;
919 devinfo
->advancedmode
= gen_FlashTable_p
[target
].advancedmode
;
920 devinfo
->pagesize
= gen_FlashTable_p
[target
].pagesize
;
921 devinfo
->sparesize
= gen_FlashTable_p
[target
].sparesize
;
922 devinfo
->totalsize
= gen_FlashTable_p
[target
].totalsize
;
923 devinfo
->sectorsize
= gen_FlashTable_p
[target
].sectorsize
;
924 devinfo
->s_acccon
= gen_FlashTable_p
[target
].s_acccon
;
925 devinfo
->s_acccon1
= gen_FlashTable_p
[target
].s_acccon1
;
926 devinfo
->freq
= gen_FlashTable_p
[target
].freq
;
927 devinfo
->vendor
= gen_FlashTable_p
[target
].vendor
;
928 //devinfo->ttarget = gen_FlashTable[target].ttarget;
929 memcpy((u8
*)&devinfo
->feature_set
, (u8
*)&gen_FlashTable_p
[target
].feature_set
, sizeof(struct MLC_feature_set
));
930 memcpy(devinfo
->devciename
, gen_FlashTable_p
[target
].devciename
, sizeof(devinfo
->devciename
));
933 MSG(INIT
, "Not Found NAND: ID [");
934 for(n
=0;n
<NAND_MAX_ID
;n
++){
935 MSG(INIT
, "%x ",id
[n
]);
941 #ifdef DUMP_NATIVE_BACKTRACE
942 #define NFI_NATIVE_LOG_SD "/sdcard/NFI_native_log_%s-%02d-%02d-%02d_%02d-%02d-%02d.log"
943 #define NFI_NATIVE_LOG_DATA "/data/NFI_native_log_%s-%02d-%02d-%02d_%02d-%02d-%02d.log"
944 static int nfi_flush_log(char *s
)
948 struct timeval tv
= { 0 };
949 struct file
*filp
= NULL
;
954 do_gettimeofday(&tv
);
955 rtc_time_to_tm(tv
.tv_sec
, &tm
);
956 memset(name
, 0, sizeof(name
));
957 sprintf(name
, NFI_NATIVE_LOG_DATA
, s
, tm
.tm_year
+ 1900, tm
.tm_mon
+ 1, tm
.tm_mday
, tm
.tm_hour
, tm
.tm_min
, tm
.tm_sec
);
961 filp
= filp_open(name
, O_WRONLY
| O_CREAT
, 0777);
964 printk("[NFI_flush_log]error create file in %s, IS_ERR:%ld, PTR_ERR:%ld\n", name
, IS_ERR(filp
), PTR_ERR(filp
));
965 memset(name
, 0, sizeof(name
));
966 sprintf(name
, NFI_NATIVE_LOG_SD
, s
, tm
.tm_year
+ 1900, tm
.tm_mon
+ 1, tm
.tm_mday
, tm
.tm_hour
, tm
.tm_min
, tm
.tm_sec
);
967 filp
= filp_open(name
, O_WRONLY
| O_CREAT
, 0777);
970 printk("[NFI_flush_log]error create file in %s, IS_ERR:%ld, PTR_ERR:%ld\n", name
, IS_ERR(filp
), PTR_ERR(filp
));
975 printk("[NFI_flush_log]log file:%s\n", name
);
978 if (!(filp
->f_op
) || !(filp
->f_op
->write
))
980 printk("[NFI_flush_log] No operation\n");
988 data_write
= vfs_write(filp
, (char __user
*)NativeInfo
, strlen(NativeInfo
), &filp
->f_pos
);
991 printk("[nfi_flush_log] write fail\n");
999 filp_close(filp
, current
->files
);
1005 //extern bool MLC_DEVICE;
1006 static bool mtk_nand_reset(void);
1007 extern u64
part_get_startaddress(u64 byte_address
,u32
* idx
);
1008 extern bool raw_partition(u32 index
);
1009 u32
mtk_nand_page_transform(struct mtd_info
*mtd
, struct nand_chip
*chip
, u32 page
, u32
* blk
, u32
* map_blk
)
1011 u32 block_size
= 1 <<(chip
->phys_erase_shift
);
1012 u32 page_size
= (1<<chip
->page_shift
);
1013 loff_t start_address
;
1018 bool translate
= FALSE
;
1019 loff_t logical_address
= (loff_t
)page
*(1<<chip
->page_shift
);
1020 //MSG(INIT , "[BEAN]%d, %x\n",page,logical_address);
1023 start_address
= part_get_startaddress(logical_address
,&idx
);
1024 //MSG(INIT , "[start_address]page = 0x%x, start_address=0x%lx\n",page,start_address);
1025 if(raw_partition(idx
))
1030 if(translate
== TRUE
)
1032 block
= (u32
)((u32
)(start_address
>> chip
->phys_erase_shift
) + (u32
)((logical_address
-start_address
) >> (chip
->phys_erase_shift
-1)));
1033 page_in_block
= ((u32
)((logical_address
-start_address
) >> chip
->page_shift
) % ((mtd
->erasesize
/page_size
)/2));
1034 //MSG(INIT , "[LOW]0x%x, 0x%x\n",block,page_in_block);
1036 if(devinfo
.vendor
!= VEND_NONE
)
1038 //page_in_block = devinfo.feature_set.PairPage[page_in_block];
1039 page_in_block
= functArray
[devinfo
.feature_set
.ptbl_idx
](page_in_block
);
1042 mapped_block
= get_mapping_block_index(block
);
1044 //MSG(INIT , "[page_in_block]mapped_block=%d, page_in_block=%d\n",mapped_block,page_in_block);
1046 *map_blk
= mapped_block
;
1047 return page_in_block
;
1051 block
= page
/(block_size
/page_size
);
1052 mapped_block
= get_mapping_block_index(block
);
1053 page_in_block
= page
% (block_size
/page_size
);
1054 //MSG(INIT , "[FULL]0x%x, 0x%x 0x%x 0x%x\n",block,page_in_block,mapped_block, page_in_block+mapped_block*(block_size/page_size));
1056 *map_blk
= mapped_block
;
1057 return page_in_block
;
1061 bool mtk_nand_IsRawPartition(loff_t logical_address
)
1064 part_get_startaddress(logical_address
,&idx
);
1065 if(raw_partition(idx
))
1075 static int mtk_nand_interface_config(struct mtd_info
*mtd
)
1080 struct gFeatureSet
*feature_set
= &(devinfo
.feature_set
.FeatureSet
);
1081 //int clksrc = ARMPLL;
1082 if(devinfo
.iowidth
== IO_ONFI
|| devinfo
.iowidth
==IO_TOGGLEDDR
|| devinfo
.iowidth
==IO_TOGGLESDR
)
1084 nand_enable_clock();
1085 //0:26M 1:182M 2:156M 3:124.8M 4:91M 5:62.4M 6:39M 7:26M
1086 if(devinfo
.freq
== 80) // mode 4
1088 g_iNFI2X_CLKSRC
= MSDCPLL
; // 156M
1089 }else if(devinfo
.freq
== 100) // mode 5
1091 g_iNFI2X_CLKSRC
= MAINPLL
; //182M
1094 //printk("[Bean]mode:%d\n", g_iNFI2X_CLKSRC);
1095 NFI_ISSUE_COMMAND (NAND_CMD_RESET
, 0, 0, 0, 0);
1096 timeout
= TIMEOUT_4
;
1101 //printk("[Interface Config]cmd:0x%X addr:0x%x feature:0x%x\n",
1102 //feature_set->sfeatureCmd, feature_set->Interface.address, feature_set->Interface.feature);
1104 //mtk_nand_GetFeature(mtd, feature_set->gfeatureCmd, \
1105 //feature_set->Interface.address, &val,4);
1106 //printk("[Interface]0x%X\n", val);
1107 mtk_nand_SetFeature(mtd
, (u16
) feature_set
->sfeatureCmd
, \
1108 feature_set
->Interface
.address
, (u8
*)&feature_set
->Interface
.feature
,\
1109 sizeof(feature_set
->Interface
.feature
));
1111 NFI_CLN_REG32(NFI_DEBUG_CON1_REG16
,HWDCM_SWCON_ON
);
1115 NFI_CLN_REG32(NFI_DEBUG_CON1_REG16
,NFI_BYPASS
);
1116 //clear bypass of ecc
1118 NFI_CLN_REG32(ECC_BYPASS_REG32
,ECC_BYPASS
);
1120 DRV_WriteReg32(PERICFG_BASE
+0x5C, 0x0); // setting default AHB clock
1121 //MSG(INIT, "AHB Clock(0x%x)\n",DRV_Reg32(PERICFG_BASE+0x5C));
1123 NFI_SET_REG32(PERI_NFI_CLK_SOURCE_SEL
, NFI_PAD_1X_CLOCK
);
1125 clkmux_sel(MT_MUX_NFI2X
,g_iNFI2X_CLKSRC
,"NFI");
1127 DRV_WriteReg32(NFI_DLYCTRL_REG32
, 0x4001);
1128 DRV_WriteReg32(PERI_NFI_MAC_CTRL
, 0x10006);
1129 while(0 == (DRV_Reg32(NFI_STA_REG32
) && STA_FLASH_MACRO_IDLE
));
1130 if(devinfo
.iowidth
== IO_ONFI
)
1131 DRV_WriteReg16(NFI_NAND_TYPE_CNFG_REG32
, 2); //ONFI
1133 DRV_WriteReg16(NFI_NAND_TYPE_CNFG_REG32
, 1); //Toggle
1134 //printk("[Timing]0x%x 0x%x\n", devinfo.s_acccon, devinfo.s_acccon1);
1135 acccon1
= DRV_Reg32(NFI_ACCCON1_REG3
);
1136 DRV_WriteReg32(NFI_ACCCON1_REG3
,devinfo
.s_acccon1
);
1137 DRV_WriteReg32(NFI_ACCCON_REG32
,devinfo
.s_acccon
);
1139 mtk_nand_GetFeature(mtd
, feature_set
->gfeatureCmd
, \
1140 feature_set
->Interface
.address
, (u8
*)&val
,4);
1141 //printk("[Bean]feature is %x\n", val);
1142 if((val
&0xFF) != (feature_set
->Interface
.feature
& 0xFF))
1144 MSG(INIT
, "[%s] fail 0x%X\n",__FUNCTION__
,val
);
1145 NFI_ISSUE_COMMAND (NAND_CMD_RESET
, 0, 0, 0, 0); //ASYNC
1146 timeout
= TIMEOUT_4
;
1150 clkmux_sel(MT_MUX_NFI2X
, MAINPLL
, "NFI"); // 182M
1151 NFI_SET_REG32(NFI_DEBUG_CON1_REG16
,NFI_BYPASS
);
1152 NFI_SET_REG32(ECC_BYPASS_REG32
,ECC_BYPASS
);
1153 NFI_CLN_REG32(PERI_NFI_CLK_SOURCE_SEL
, NFI_PAD_1X_CLOCK
);
1154 DRV_WriteReg32(PERICFG_BASE
+0x5C, 0x1); // setting AHB clock
1155 //MSG(INIT, "AHB Clock(0x%x)\n",DRV_Reg32(PERICFG_BASE+0x5C));
1156 DRV_WriteReg32(NFI_ACCCON1_REG3
,acccon1
);
1157 DRV_WriteReg32(NFI_ACCCON_REG32
,devinfo
.timmingsetting
);
1158 DRV_WriteReg16(NFI_NAND_TYPE_CNFG_REG32
, 0); //Legacy
1159 g_bSyncOrToggle
= false;
1162 g_bSyncOrToggle
= true;
1164 MSG(INIT
, "[%s] success 0x%X\n",__FUNCTION__
, devinfo
.iowidth
);
1165 //extern void log_boot(char *str);
1166 //log_boot("[Bean]sync mode success!");
1170 g_bSyncOrToggle
= false;
1171 MSG(INIT
, "[%s] legacy interface \n",__FUNCTION__
);
1179 static int mtk_nand_turn_on_randomizer(u32 page
, int type
, int fgPage
)
1182 u32 u4NFI_RAN_CFG
= 0;
1183 u4NFI_CFG
= DRV_Reg32(NFI_CNFG_REG16
);
1185 DRV_WriteReg32(NFI_ENMPTY_THRESH_REG32
, 40); // empty threshold 40
1189 DRV_WriteReg32(NFI_RANDOM_ENSEED01_TS_REG32
, 0);
1190 DRV_WriteReg32(NFI_RANDOM_ENSEED02_TS_REG32
, 0);
1191 DRV_WriteReg32(NFI_RANDOM_ENSEED03_TS_REG32
, 0);
1192 DRV_WriteReg32(NFI_RANDOM_ENSEED04_TS_REG32
, 0);
1193 DRV_WriteReg32(NFI_RANDOM_ENSEED05_TS_REG32
, 0);
1194 DRV_WriteReg32(NFI_RANDOM_ENSEED06_TS_REG32
, 0);
1198 DRV_WriteReg32(NFI_RANDOM_DESEED01_TS_REG32
, 0);
1199 DRV_WriteReg32(NFI_RANDOM_DESEED02_TS_REG32
, 0);
1200 DRV_WriteReg32(NFI_RANDOM_DESEED03_TS_REG32
, 0);
1201 DRV_WriteReg32(NFI_RANDOM_DESEED04_TS_REG32
, 0);
1202 DRV_WriteReg32(NFI_RANDOM_DESEED05_TS_REG32
, 0);
1203 DRV_WriteReg32(NFI_RANDOM_DESEED06_TS_REG32
, 0);
1205 u4NFI_CFG
|= CNFG_RAN_SEL
;
1206 if(PAGES_PER_BLOCK
<= SS_SEED_NUM
)
1210 u4NFI_RAN_CFG
|= RAN_CNFG_ENCODE_SEED(SS_RANDOM_SEED
[page
& (PAGES_PER_BLOCK
-1)]) | RAN_CNFG_ENCODE_EN
;
1214 u4NFI_RAN_CFG
|= RAN_CNFG_DECODE_SEED(SS_RANDOM_SEED
[page
& (PAGES_PER_BLOCK
-1)]) | RAN_CNFG_DECODE_EN
;
1221 u4NFI_RAN_CFG
|= RAN_CNFG_ENCODE_SEED(SS_RANDOM_SEED
[page
& (SS_SEED_NUM
-1)]) | RAN_CNFG_ENCODE_EN
;
1225 u4NFI_RAN_CFG
|= RAN_CNFG_DECODE_SEED(SS_RANDOM_SEED
[page
& (SS_SEED_NUM
-1)]) | RAN_CNFG_DECODE_EN
;
1230 if(fgPage
) //reload seed for each page
1231 u4NFI_CFG
&= ~CNFG_RAN_SEC
;
1232 else //reload seed for each sector
1233 u4NFI_CFG
|= CNFG_RAN_SEC
;
1235 DRV_WriteReg32(NFI_CNFG_REG16
, u4NFI_CFG
);
1236 DRV_WriteReg32(NFI_RANDOM_CNFG_REG32
, u4NFI_RAN_CFG
);
1237 //MSG(INIT, "[K]ran turn on type:%d 0x%x 0x%x\n", type, DRV_Reg32(NFI_RANDOM_CNFG_REG32), page);
1241 static bool mtk_nand_israndomizeron(void)
1243 u32 nfi_ran_cnfg
= 0;
1244 nfi_ran_cnfg
= DRV_Reg32(NFI_RANDOM_CNFG_REG32
);
1245 if(nfi_ran_cnfg
&(RAN_CNFG_ENCODE_EN
| RAN_CNFG_DECODE_EN
))
1251 static void mtk_nand_turn_off_randomizer(void)
1253 u32 u4NFI_CFG
= DRV_Reg32(NFI_CNFG_REG16
);
1254 u4NFI_CFG
&= ~CNFG_RAN_SEL
;
1255 u4NFI_CFG
&= ~CNFG_RAN_SEC
;
1256 DRV_WriteReg32(NFI_RANDOM_CNFG_REG32
, 0);
1257 DRV_WriteReg32(NFI_CNFG_REG16
, u4NFI_CFG
);
1258 //MSG(INIT, "[K]ran turn off\n");
1261 #define mtk_nand_israndomizeron() (FALSE)
1262 #define mtk_nand_turn_on_randomizer(page, type, fgPage)
1263 #define mtk_nand_turn_off_randomizer()
1267 /******************************************************************************
1268 * mtk_nand_irq_handler
1271 * NAND interrupt handler!
1278 * IRQ_HANDLED : Successfully handle the IRQ
1283 ******************************************************************************/
1284 /* Modified for TCM used */
1285 static irqreturn_t
mtk_nand_irq_handler(int irqno
, void *dev_id
)
1287 u16 u16IntStatus
= DRV_Reg16(NFI_INTR_REG16
);
1290 if (u16IntStatus
& (u16
) INTR_AHB_DONE_EN
)
1292 complete(&g_comp_AHB_Done
);
1297 /******************************************************************************
1304 * struct mtk_nand_host_hw *hw
1312 ******************************************************************************/
1313 static void ECC_Config(struct mtk_nand_host_hw
*hw
,u32 ecc_bit
)
1317 u32 ecc_bit_cfg
= ECC_CNFG_ECC4
;
1320 #ifndef MTK_COMBO_NAND_SUPPORT
1322 ecc_bit_cfg
= ECC_CNFG_ECC4
;
1325 ecc_bit_cfg
= ECC_CNFG_ECC8
;
1328 ecc_bit_cfg
= ECC_CNFG_ECC10
;
1331 ecc_bit_cfg
= ECC_CNFG_ECC12
;
1334 ecc_bit_cfg
= ECC_CNFG_ECC14
;
1337 ecc_bit_cfg
= ECC_CNFG_ECC16
;
1340 ecc_bit_cfg
= ECC_CNFG_ECC18
;
1343 ecc_bit_cfg
= ECC_CNFG_ECC20
;
1346 ecc_bit_cfg
= ECC_CNFG_ECC22
;
1349 ecc_bit_cfg
= ECC_CNFG_ECC24
;
1353 ecc_bit_cfg
= ECC_CNFG_ECC28
;
1356 ecc_bit_cfg
= ECC_CNFG_ECC32
;
1359 ecc_bit_cfg
= ECC_CNFG_ECC36
;
1362 ecc_bit_cfg
= ECC_CNFG_ECC40
;
1365 ecc_bit_cfg
= ECC_CNFG_ECC44
;
1368 ecc_bit_cfg
= ECC_CNFG_ECC48
;
1371 ecc_bit_cfg
= ECC_CNFG_ECC52
;
1374 ecc_bit_cfg
= ECC_CNFG_ECC56
;
1377 ecc_bit_cfg
= ECC_CNFG_ECC60
;
1383 DRV_WriteReg16(ECC_DECCON_REG16
, DEC_DE
);
1387 while (!DRV_Reg16(ECC_DECIDLE_REG16
));
1389 DRV_WriteReg16(ECC_ENCCON_REG16
, ENC_DE
);
1393 while (!DRV_Reg32(ECC_ENCIDLE_REG32
));
1395 /* setup FDM register base */
1396 // DRV_WriteReg32(ECC_FDMADDR_REG32, NFI_FDM0L_REG32);
1399 u4ENCODESize
= (hw
->nand_sec_size
+ 8) << 3;
1400 /* Sector + FDM + YAFFS2 meta data bits */
1401 u4DECODESize
= ((hw
->nand_sec_size
+ 8) << 3) + ecc_bit
* ECC_PARITY_BIT
;
1403 /* configure ECC decoder && encoder */
1404 DRV_WriteReg32(ECC_DECCNFG_REG32
, ecc_bit_cfg
| DEC_CNFG_NFI
| DEC_CNFG_EMPTY_EN
| (u4DECODESize
<< DEC_CNFG_CODE_SHIFT
));
1406 DRV_WriteReg32(ECC_ENCCNFG_REG32
, ecc_bit_cfg
| ENC_CNFG_NFI
| (u4ENCODESize
<< ENC_CNFG_MSG_SHIFT
));
1407 #ifndef MANUAL_CORRECT
1408 NFI_SET_REG32(ECC_DECCNFG_REG32
, DEC_CNFG_CORRECT
);
1410 NFI_SET_REG32(ECC_DECCNFG_REG32
, DEC_CNFG_EL
);
1414 /******************************************************************************
1418 * HW ECC Decode Start !
1429 ******************************************************************************/
1430 static void ECC_Decode_Start(void)
1432 /* wait for device returning idle */
1433 while (!(DRV_Reg16(ECC_DECIDLE_REG16
) & DEC_IDLE
)) ;
1434 DRV_WriteReg16(ECC_DECCON_REG16
, DEC_EN
);
1437 /******************************************************************************
1441 * HW ECC Decode End !
1452 ******************************************************************************/
1453 static void ECC_Decode_End(void)
1455 /* wait for device returning idle */
1456 while (!(DRV_Reg16(ECC_DECIDLE_REG16
) & DEC_IDLE
)) ;
1457 DRV_WriteReg16(ECC_DECCON_REG16
, DEC_DE
);
1460 /******************************************************************************
1464 * HW ECC Encode Start !
1475 ******************************************************************************/
1476 static void ECC_Encode_Start(void)
1478 /* wait for device returning idle */
1479 while (!(DRV_Reg32(ECC_ENCIDLE_REG32
) & ENC_IDLE
)) ;
1481 DRV_WriteReg16(ECC_ENCCON_REG16
, ENC_EN
);
1484 /******************************************************************************
1488 * HW ECC Encode End !
1499 ******************************************************************************/
1500 static void ECC_Encode_End(void)
1502 /* wait for device returning idle */
1503 while (!(DRV_Reg32(ECC_ENCIDLE_REG32
) & ENC_IDLE
)) ;
1505 DRV_WriteReg16(ECC_ENCCON_REG16
, ENC_DE
);
1508 static bool is_empty_page(u8
* spare_buf
, u32 sec_num
){
1512 for(i
=0;i
<sec_num
*8;i
++){
1513 if(spare_buf
[i
]!=0xFF){
1520 for(i
=0;i
<OOB_INDEX_SIZE
;i
++){
1521 //xlog_printk(ANDROID_LOG_INFO,"NFI", "flag byte: %x ",spare_buf[OOB_INDEX_OFFSET+i] );
1525 if(spare_buf
[13+i
] !=0xFF){
1531 if(spare_buf
[OOB_INDEX_OFFSET
+i
] !=0xFF){
1540 xlog_printk(ANDROID_LOG_INFO
,"NFI", "This page is %s!\n",is_empty
?"empty":"occupied");
1543 static bool return_fake_buf(u8
* data_buf
, u32 page_size
, u32 sec_num
,u32 u4PageAddr
){
1545 u32 sec_zero_count
=0;
1549 for(j
=0;j
<sec_num
;j
++){
1550 p
=data_buf
+j
*host
->hw
->nand_sec_size
;
1552 for(i
=0;i
<host
->hw
->nand_sec_size
;i
++){
1555 t
=((t
&0xaa)>>1) + (t
&0x55);
1556 t
=((t
&0xcc)>>2)+(t
&0x33);
1557 t
=((t
&0xf0f0)>>4)+(t
&0x0f0f);
1560 xlog_printk(ANDROID_LOG_INFO
,"NFI", "there is %d bit filp at sector(%d): %d in empty page \n ",t
,j
,i
);
1563 if(sec_zero_count
> 2){
1564 xlog_printk(ANDROID_LOG_ERROR
,"NFI","too many bit filp=%d @ page addr=0x%x, we can not return fake buf\n",sec_zero_count
,u4PageAddr
);
1571 /******************************************************************************
1572 * mtk_nand_check_bch_error
1575 * Check BCH error or not !
1578 * struct mtd_info *mtd
1589 ******************************************************************************/
1590 static bool mtk_nand_check_bch_error(struct mtd_info
*mtd
, u8
* pDataBuf
,u8
* spareBuf
,u32 u4SecIndex
, u32 u4PageAddr
, u32
* bitmap
)
1593 u16 u2SectorDoneMask
= 1 << u4SecIndex
;
1594 u32 u4ErrorNumDebug0
,u4ErrorNumDebug1
, i
, u4ErrNum
;
1595 u32 timeout
= 0xFFFF;
1596 u32 correct_count
= 0;
1597 u32 page_size
=(u4SecIndex
+1)*host
->hw
->nand_sec_size
;
1598 u32 sec_num
=u4SecIndex
+1;
1599 //u32 bitflips = sec_num * 39;
1601 u32 maxSectorBitErr
= 0;
1603 #ifdef MANUAL_CORRECT
1604 u32 au4ErrBitLoc
[6];
1605 u32 u4ErrByteLoc
, u4BitOffset
;
1606 u32 u4ErrBitLoc1th
, u4ErrBitLoc2nd
;
1609 while (0 == (u2SectorDoneMask
& DRV_Reg16(ECC_DECDONE_REG16
)))
1617 #ifndef MANUAL_CORRECT
1618 if(0 == (DRV_Reg32(NFI_STA_REG32
) & STA_READ_EMPTY
))
1620 u4ErrorNumDebug0
= DRV_Reg32(ECC_DECENUM0_REG32
);
1621 u4ErrorNumDebug1
= DRV_Reg32(ECC_DECENUM1_REG32
);
1622 if (0 != (u4ErrorNumDebug0
& 0xFFFFFFFF) || 0 != (u4ErrorNumDebug1
& 0xFFFFFFFF))
1624 for (i
= 0; i
<= u4SecIndex
; ++i
)
1627 u4ErrNum
= (DRV_Reg32((ECC_DECENUM0_REG32
+(i
/4)))>>((i
%4)*8))& ERR_NUM0
;
1631 u4ErrNum
= DRV_Reg32(ECC_DECENUM0_REG32
) >> (i
* 8);
1634 u4ErrNum
= DRV_Reg32(ECC_DECENUM1_REG32
) >> ((i
- 4) * 8);
1636 u4ErrNum
&= ERR_NUM0
;
1638 if (ERR_NUM0
== u4ErrNum
)
1642 //xlog_printk(ANDROID_LOG_WARN,"NFI", "UnCorrectable ECC errors at PageAddr=%d, Sector=%d\n", u4PageAddr, i);
1643 MSG(INIT
,"UnCorrectable ECC errors at PageAddr=%d, Sector=%d\n", u4PageAddr
, i
);
1650 if(maxSectorBitErr
< u4ErrNum
)
1651 maxSectorBitErr
= u4ErrNum
;
1652 correct_count
+= u4ErrNum
;
1653 // xlog_printk(ANDROID_LOG_INFO,"NFI"," In kernel Correct %d ECC error(s) at PageAddr=%d, Sector=%d\n", u4ErrNum, u4PageAddr, i);
1657 mtd
->ecc_stats
.failed
+=failed_sec
;
1658 if ((maxSectorBitErr
> ecc_threshold
) && (FALSE
!= ret
))
1660 MSG(INIT
,"ECC bit flips (0x%x) exceed eccthreshold (0x%x),u4PageAddr 0x%x\n",maxSectorBitErr
,ecc_threshold
,u4PageAddr
);
1661 mtd
->ecc_stats
.corrected
++;
1664 //xlog_printk(ANDROID_LOG_INFO,"NFI", "Less than 39 bit error, ignore\n");
1669 if(0 != (DRV_Reg32(NFI_STA_REG32
) & STA_READ_EMPTY
))
1672 //MSG(INIT, "empty page, empty buffer returned\n");
1673 memset(pDataBuf
,0xff,page_size
);
1674 memset(spareBuf
,0xff,sec_num
*8);
1675 maxSectorBitErr
= 0;
1680 /* We will manually correct the error bits in the last sector, not all the sectors of the page! */
1681 memset(au4ErrBitLoc
, 0x0, sizeof(au4ErrBitLoc
));
1682 u4ErrorNumDebug
= DRV_Reg32(ECC_DECENUM_REG32
);
1683 u4ErrNum
= (DRV_Reg32((ECC_DECENUM_REG32
+(u4SecIndex
/4)))>>((u4SecIndex
%4)*8))& ERR_NUM0
;
1687 if (ERR_NUM0
== u4ErrNum
)
1689 mtd
->ecc_stats
.failed
++;
1691 //printk(KERN_ERR"UnCorrectable at PageAddr=%d\n", u4PageAddr);
1694 for (i
= 0; i
< ((u4ErrNum
+ 1) >> 1); ++i
)
1696 au4ErrBitLoc
[i
] = DRV_Reg32(ECC_DECEL0_REG32
+ i
);
1697 u4ErrBitLoc1th
= au4ErrBitLoc
[i
] & 0x3FFF;
1699 if (u4ErrBitLoc1th
< 0x1000)
1701 u4ErrByteLoc
= u4ErrBitLoc1th
/ 8;
1702 u4BitOffset
= u4ErrBitLoc1th
% 8;
1703 pDataBuf
[u4ErrByteLoc
] = pDataBuf
[u4ErrByteLoc
] ^ (1 << u4BitOffset
);
1704 mtd
->ecc_stats
.corrected
++;
1707 mtd
->ecc_stats
.failed
++;
1708 //printk(KERN_ERR"UnCorrectable ErrLoc=%d\n", au4ErrBitLoc[i]);
1710 u4ErrBitLoc2nd
= (au4ErrBitLoc
[i
] >> 16) & 0x3FFF;
1711 if (0 != u4ErrBitLoc2nd
)
1713 if (u4ErrBitLoc2nd
< 0x1000)
1715 u4ErrByteLoc
= u4ErrBitLoc2nd
/ 8;
1716 u4BitOffset
= u4ErrBitLoc2nd
% 8;
1717 pDataBuf
[u4ErrByteLoc
] = pDataBuf
[u4ErrByteLoc
] ^ (1 << u4BitOffset
);
1718 mtd
->ecc_stats
.corrected
++;
1721 mtd
->ecc_stats
.failed
++;
1722 //printk(KERN_ERR"UnCorrectable High ErrLoc=%d\n", au4ErrBitLoc[i]);
1727 if (0 == (DRV_Reg16(ECC_DECFER_REG16
) & (1 << u4SecIndex
)))
1736 /******************************************************************************
1737 * mtk_nand_RFIFOValidSize
1740 * Check the Read FIFO data bytes !
1751 ******************************************************************************/
1752 static bool mtk_nand_RFIFOValidSize(u16 u2Size
)
1754 u32 timeout
= 0xFFFF;
1755 while (FIFO_RD_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16
)) < u2Size
)
1766 /******************************************************************************
1767 * mtk_nand_WFIFOValidSize
1770 * Check the Write FIFO data bytes !
1781 ******************************************************************************/
1782 static bool mtk_nand_WFIFOValidSize(u16 u2Size
)
1784 u32 timeout
= 0xFFFF;
1785 while (FIFO_WR_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16
)) > u2Size
)
1796 /******************************************************************************
1797 * mtk_nand_status_ready
1800 * Indicate the NAND device is ready or not !
1811 ******************************************************************************/
1812 static bool mtk_nand_status_ready(u32 u4Status
)
1814 u32 timeout
= 0xFFFF;
1815 while ((DRV_Reg32(NFI_STA_REG32
) & u4Status
) != 0)
1826 /******************************************************************************
1830 * Reset the NAND device hardware component !
1833 * struct mtk_nand_host *host (Initial setting data)
1841 ******************************************************************************/
1842 static bool mtk_nand_reset(void)
1844 // HW recommended reset flow
1845 int timeout
= 0xFFFF;
1846 if (DRV_Reg16(NFI_MASTERSTA_REG16
) & 0xFFF) // master is busy
1849 DRV_WriteReg32(NFI_CON_REG16
, CON_FIFO_FLUSH
| CON_NFI_RST
);
1850 while (DRV_Reg16(NFI_MASTERSTA_REG16
) & 0xFFF)
1855 MSG(INIT
, "Wait for NFI_MASTERSTA timeout\n");
1859 /* issue reset operation */
1861 DRV_WriteReg32(NFI_CON_REG16
, CON_FIFO_FLUSH
| CON_NFI_RST
);
1863 return mtk_nand_status_ready(STA_NFI_FSM_MASK
| STA_NAND_BUSY
) && mtk_nand_RFIFOValidSize(0) && mtk_nand_WFIFOValidSize(0);
1866 /******************************************************************************
1870 * Set the oepration mode !
1873 * u16 u2OpMode (read/write)
1881 ******************************************************************************/
1882 static void mtk_nand_set_mode(u16 u2OpMode
)
1884 u16 u2Mode
= DRV_Reg16(NFI_CNFG_REG16
);
1885 u2Mode
&= ~CNFG_OP_MODE_MASK
;
1887 DRV_WriteReg16(NFI_CNFG_REG16
, u2Mode
);
1890 /******************************************************************************
1891 * mtk_nand_set_autoformat
1894 * Enable/Disable hardware autoformat !
1897 * bool bEnable (Enable/Disable)
1905 ******************************************************************************/
1906 static void mtk_nand_set_autoformat(bool bEnable
)
1910 NFI_SET_REG16(NFI_CNFG_REG16
, CNFG_AUTO_FMT_EN
);
1913 NFI_CLN_REG16(NFI_CNFG_REG16
, CNFG_AUTO_FMT_EN
);
1917 /******************************************************************************
1918 * mtk_nand_configure_fdm
1921 * Configure the FDM data size !
1932 ******************************************************************************/
1933 static void mtk_nand_configure_fdm(u16 u2FDMSize
)
1935 NFI_CLN_REG16(NFI_PAGEFMT_REG16
, PAGEFMT_FDM_MASK
| PAGEFMT_FDM_ECC_MASK
);
1936 NFI_SET_REG16(NFI_PAGEFMT_REG16
, u2FDMSize
<< PAGEFMT_FDM_SHIFT
);
1937 NFI_SET_REG16(NFI_PAGEFMT_REG16
, u2FDMSize
<< PAGEFMT_FDM_ECC_SHIFT
);
1941 static bool mtk_nand_pio_ready(void)
1944 while (!(DRV_Reg16(NFI_PIO_DIRDY_REG16
) & 1))
1949 printk("PIO_DIRDY timeout\n");
1957 /******************************************************************************
1958 * mtk_nand_set_command
1961 * Send hardware commands to NAND devices !
1972 ******************************************************************************/
1973 static bool mtk_nand_set_command(u16 command
)
1975 /* Write command to device */
1977 DRV_WriteReg16(NFI_CMD_REG16
, command
);
1978 return mtk_nand_status_ready(STA_CMD_STATE
);
1981 /******************************************************************************
1982 * mtk_nand_set_address
1985 * Set the hardware address register !
1988 * struct nand_chip *nand, u32 u4RowAddr
1996 ******************************************************************************/
1997 static bool mtk_nand_set_address(u32 u4ColAddr
, u32 u4RowAddr
, u16 u2ColNOB
, u16 u2RowNOB
)
1999 /* fill cycle addr */
2001 DRV_WriteReg32(NFI_COLADDR_REG32
, u4ColAddr
);
2002 DRV_WriteReg32(NFI_ROWADDR_REG32
, u4RowAddr
);
2003 DRV_WriteReg16(NFI_ADDRNOB_REG16
, u2ColNOB
| (u2RowNOB
<< ADDR_ROW_NOB_SHIFT
));
2004 return mtk_nand_status_ready(STA_ADDR_STATE
);
2007 //-------------------------------------------------------------------------------
2008 static bool mtk_nand_device_reset(void)
2010 u32 timeout
= 0xFFFF;
2014 DRV_WriteReg(NFI_CNFG_REG16
, CNFG_OP_RESET
);
2016 mtk_nand_set_command(NAND_CMD_RESET
);
2018 while(!(DRV_Reg32(NFI_STA_REG32
) & STA_NAND_BUSY_RETURN
) && (timeout
--));
2025 //-------------------------------------------------------------------------------
2027 /******************************************************************************
2028 * mtk_nand_check_RW_count
2031 * Check the RW how many sectors !
2042 ******************************************************************************/
2043 static bool mtk_nand_check_RW_count(u16 u2WriteSize
)
2045 u32 timeout
= 0xFFFF;
2046 u16 u2SecNum
= u2WriteSize
>> host
->hw
->nand_sec_shift
;
2048 while (ADDRCNTR_CNTR(DRV_Reg32(NFI_ADDRCNTR_REG16
)) < u2SecNum
)
2053 printk(KERN_INFO
"[%s] timeout\n", __FUNCTION__
);
2060 /******************************************************************************
2061 * mtk_nand_ready_for_read
2064 * Prepare hardware environment for read !
2067 * struct nand_chip *nand, u32 u4RowAddr
2075 ******************************************************************************/
2076 static bool mtk_nand_ready_for_read(struct nand_chip
*nand
, u32 u4RowAddr
, u32 u4ColAddr
, u16 sec_num
, bool full
, u8
* buf
)
2078 /* Reset NFI HW internal state machine and flush NFI in/out FIFO */
2080 //u16 sec_num = 1 << (nand->page_shift - host->hw->nand_sec_shift);
2081 u32 col_addr
= u4ColAddr
;
2082 u32 colnob
= 2, rownob
= devinfo
.addr_cycle
- 2;
2083 //u32 reg_val = DRV_Reg32(NFI_MASTERRST_REG32);
2084 #if __INTERNAL_USE_AHB_MODE__
2087 #if CFG_PERFLOG_DEBUG
2088 struct timeval stimer
,etimer
;
2089 do_gettimeofday(&stimer
);
2091 if(DRV_Reg32(NFI_NAND_TYPE_CNFG_REG32
)&0x3)
2093 NFI_SET_REG16(NFI_MASTERRST_REG32
, PAD_MACRO_RST
);//reset
2094 NFI_CLN_REG16(NFI_MASTERRST_REG32
, PAD_MACRO_RST
);//dereset
2097 if (nand
->options
& NAND_BUSWIDTH_16
)
2100 if (!mtk_nand_reset())
2107 NFI_SET_REG32(NFI_CNFG_REG16
, CNFG_HW_ECC_EN
);
2110 NFI_CLN_REG32(NFI_CNFG_REG16
, CNFG_HW_ECC_EN
);
2113 mtk_nand_set_mode(CNFG_OP_READ
);
2114 NFI_SET_REG16(NFI_CNFG_REG16
, CNFG_READ_EN
);
2115 DRV_WriteReg32(NFI_CON_REG16
, sec_num
<< CON_NFI_SEC_SHIFT
);
2119 #if __INTERNAL_USE_AHB_MODE__
2120 NFI_SET_REG16(NFI_CNFG_REG16
, CNFG_AHB
);
2121 phys
= nand_virt_to_phys_add((u32
) buf
);
2124 printk(KERN_ERR
"[mtk_nand_ready_for_read]convert virt addr (%x) to phys add (%x)fail!!!", (u32
) buf
, phys
);
2128 DRV_WriteReg32(NFI_STRADDR_REG32
, phys
);
2131 NFI_CLN_REG16(NFI_CNFG_REG16
, CNFG_AHB
);
2136 NFI_SET_REG16(NFI_CNFG_REG16
, CNFG_HW_ECC_EN
);
2139 NFI_CLN_REG16(NFI_CNFG_REG16
, CNFG_HW_ECC_EN
);
2144 NFI_CLN_REG16(NFI_CNFG_REG16
, CNFG_HW_ECC_EN
);
2145 NFI_CLN_REG16(NFI_CNFG_REG16
, CNFG_AHB
);
2148 mtk_nand_set_autoformat(full
);
2156 if (!mtk_nand_set_command(NAND_CMD_READ0
))
2160 if (!mtk_nand_set_address(col_addr
, u4RowAddr
, colnob
, rownob
))
2165 if (!mtk_nand_set_command(NAND_CMD_READSTART
))
2170 if (!mtk_nand_status_ready(STA_NAND_BUSY
))
2178 #if CFG_PERFLOG_DEBUG
2179 do_gettimeofday(&etimer
);
2180 g_NandPerfLog
.ReadBusyTotalTime
+= Cal_timediff(&etimer
,&stimer
);
2181 g_NandPerfLog
.ReadBusyCount
++;
2186 /******************************************************************************
2187 * mtk_nand_ready_for_write
2190 * Prepare hardware environment for write !
2193 * struct nand_chip *nand, u32 u4RowAddr
2201 ******************************************************************************/
2202 static bool mtk_nand_ready_for_write(struct nand_chip
*nand
, u32 u4RowAddr
, u32 col_addr
, bool full
, u8
* buf
)
2205 u32 sec_num
= 1 << (nand
->page_shift
- host
->hw
->nand_sec_shift
);
2206 u32 colnob
= 2, rownob
= devinfo
.addr_cycle
- 2;
2207 #if __INTERNAL_USE_AHB_MODE__
2211 if (nand
->options
& NAND_BUSWIDTH_16
)
2214 /* Reset NFI HW internal state machine and flush NFI in/out FIFO */
2215 if (!mtk_nand_reset())
2217 printk("[Bean]mtk_nand_ready_for_write (mtk_nand_reset) fail!\n");
2221 mtk_nand_set_mode(CNFG_OP_PRGM
);
2223 NFI_CLN_REG16(NFI_CNFG_REG16
, CNFG_READ_EN
);
2225 DRV_WriteReg32(NFI_CON_REG16
, sec_num
<< CON_NFI_SEC_SHIFT
);
2229 #if __INTERNAL_USE_AHB_MODE__
2230 NFI_SET_REG16(NFI_CNFG_REG16
, CNFG_AHB
);
2231 phys
= nand_virt_to_phys_add((u32
) buf
);
2232 //T_phys=__virt_to_phys(buf);
2235 printk(KERN_ERR
"[mt65xx_nand_ready_for_write]convert virt addr (%x) to phys add fail!!!", (u32
) buf
);
2239 DRV_WriteReg32(NFI_STRADDR_REG32
, phys
);
2242 if ((T_phys
> 0x700000 && T_phys
< 0x800000) || (phys
> 0x700000 && phys
< 0x800000))
2245 printk("[NFI_WRITE]ERROR: Forbidden AHB address wrong phys address =0x%x , right phys address=0x%x, virt address= 0x%x (count = %d)\n", T_phys
, phys
, (u32
) buf
, g_dump_count
++);
2246 show_stack(NULL
, NULL
);
2252 NFI_CLN_REG16(NFI_CNFG_REG16
, CNFG_AHB
);
2256 NFI_SET_REG16(NFI_CNFG_REG16
, CNFG_HW_ECC_EN
);
2259 NFI_CLN_REG16(NFI_CNFG_REG16
, CNFG_HW_ECC_EN
);
2263 NFI_CLN_REG16(NFI_CNFG_REG16
, CNFG_HW_ECC_EN
);
2264 NFI_CLN_REG16(NFI_CNFG_REG16
, CNFG_AHB
);
2267 mtk_nand_set_autoformat(full
);
2277 if (!mtk_nand_set_command(NAND_CMD_SEQIN
))
2279 printk("[Bean]mtk_nand_ready_for_write (mtk_nand_set_command) fail!\n");
2282 //1 FIXED ME: For Any Kind of AddrCycle
2283 if (!mtk_nand_set_address(col_addr
, u4RowAddr
, colnob
, rownob
))
2285 printk("[Bean]mtk_nand_ready_for_write (mtk_nand_set_address) fail!\n");
2289 if (!mtk_nand_status_ready(STA_NAND_BUSY
))
2291 printk("[Bean]mtk_nand_ready_for_write (mtk_nand_status_ready) fail!\n");
2301 static bool mtk_nand_check_dececc_done(u32 u4SecNum
)
2304 struct timeval timer_timeout
, timer_cur
;
2305 do_gettimeofday(&timer_timeout
);
2307 timer_timeout
.tv_usec
+= 800 * 1000; // 500ms
2308 if (timer_timeout
.tv_usec
>= 1000000) // 1 second
2310 timer_timeout
.tv_usec
-= 1000000;
2311 timer_timeout
.tv_sec
+= 1;
2314 dec_mask
= (1 << (u4SecNum
- 1));
2315 while (dec_mask
!= (DRV_Reg(ECC_DECDONE_REG16
) & dec_mask
))
2317 do_gettimeofday(&timer_cur
);
2318 if (timeval_compare(&timer_cur
, &timer_timeout
) >= 0)
2320 MSG(INIT
, "ECC_DECDONE: timeout 0x%x %d\n",DRV_Reg(ECC_DECDONE_REG16
),u4SecNum
);
2325 while (DRV_Reg32(ECC_DECFSM_REG32
) != ECC_DECFSM_IDLE
)
2327 do_gettimeofday(&timer_cur
);
2328 if (timeval_compare(&timer_cur
, &timer_timeout
) >= 0)
2330 MSG(INIT
, "ECC_DECDONE: timeout 0x%x %d\n",DRV_Reg(ECC_DECDONE_REG16
),u4SecNum
);
2338 /******************************************************************************
2339 * mtk_nand_read_page_data
2342 * Fill the page data into buffer !
2345 * u8* pDataBuf, u32 u4Size
2353 ******************************************************************************/
2354 static bool mtk_nand_dma_read_data(struct mtd_info
*mtd
, u8
* buf
, u32 length
)
2356 int interrupt_en
= g_i4Interrupt
;
2357 int timeout
= 0xfffff;
2358 struct scatterlist sg
;
2359 enum dma_data_direction dir
= DMA_FROM_DEVICE
;
2360 #if CFG_PERFLOG_DEBUG
2361 struct timeval stimer
,etimer
;
2362 do_gettimeofday(&stimer
);
2364 sg_init_one(&sg
, buf
, length
);
2365 dma_map_sg(&(mtd
->dev
), &sg
, 1, dir
);
2367 NFI_CLN_REG16(NFI_CNFG_REG16
, CNFG_BYTE_RW
);
2368 // DRV_WriteReg32(NFI_STRADDR_REG32, __virt_to_phys(pDataBuf));
2370 if ((unsigned int)buf
% 16) // TODO: can not use AHB mode here
2372 printk(KERN_INFO
"Un-16-aligned address\n");
2373 NFI_CLN_REG16(NFI_CNFG_REG16
, CNFG_DMA_BURST_EN
);
2376 NFI_SET_REG16(NFI_CNFG_REG16
, CNFG_DMA_BURST_EN
);
2379 DRV_Reg16(NFI_INTR_REG16
);
2380 DRV_WriteReg16(NFI_INTR_EN_REG16
, INTR_AHB_DONE_EN
);
2384 init_completion(&g_comp_AHB_Done
);
2386 //dmac_inv_range(pDataBuf, pDataBuf + u4Size);
2388 NFI_SET_REG32(NFI_CON_REG16
, CON_NFI_BRD
);
2393 // Wait 10ms for AHB done
2394 if (!wait_for_completion_timeout(&g_comp_AHB_Done
, 50))
2396 MSG(INIT
, "wait for completion timeout happened @ [%s]: %d\n", __FUNCTION__
, __LINE__
);
2402 while ((length
>> host
->hw
->nand_sec_shift
) > ((DRV_Reg32(NFI_BYTELEN_REG16
) & 0x1f000) >> 12))
2407 printk(KERN_ERR
"[%s] poll BYTELEN error\n", __FUNCTION__
);
2409 return false; //4 // AHB Mode Time Out!
2414 while (!DRV_Reg16(NFI_INTR_REG16
))
2419 printk(KERN_ERR
"[%s] poll nfi_intr error\n", __FUNCTION__
);
2422 return false; //4 // AHB Mode Time Out!
2426 while ((length
>> host
->hw
->nand_sec_shift
) > ((DRV_Reg32(NFI_BYTELEN_REG16
) & 0x1f000) >> 12))
2431 printk(KERN_ERR
"[%s] poll BYTELEN error\n", __FUNCTION__
);
2434 return false; //4 // AHB Mode Time Out!
2439 dma_unmap_sg(&(mtd
->dev
), &sg
, 1, dir
);
2440 #if CFG_PERFLOG_DEBUG
2441 do_gettimeofday(&etimer
);
2442 g_NandPerfLog
.ReadDMATotalTime
+= Cal_timediff(&etimer
,&stimer
);
2443 g_NandPerfLog
.ReadDMACount
++;
2448 static bool mtk_nand_mcu_read_data(u8
* buf
, u32 length
)
2450 int timeout
= 0xffff;
2452 u32
*buf32
= (u32
*) buf
;
2454 unsigned long long time1
, time2
;
2455 time1
= sched_clock();
2457 if ((u32
) buf
% 4 || length
% 4)
2458 NFI_SET_REG16(NFI_CNFG_REG16
, CNFG_BYTE_RW
);
2460 NFI_CLN_REG16(NFI_CNFG_REG16
, CNFG_BYTE_RW
);
2462 //DRV_WriteReg32(NFI_STRADDR_REG32, 0);
2464 NFI_SET_REG32(NFI_CON_REG16
, CON_NFI_BRD
);
2466 if ((u32
) buf
% 4 || length
% 4)
2468 for (i
= 0; (i
< (length
)) && (timeout
> 0);)
2470 //if (FIFO_RD_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) >= 4)
2471 if (DRV_Reg16(NFI_PIO_DIRDY_REG16
) & 1)
2473 *buf
++ = (u8
) DRV_Reg32(NFI_DATAR_REG32
);
2481 printk(KERN_ERR
"[%s] timeout\n", __FUNCTION__
);
2488 for (i
= 0; (i
< (length
>> 2)) && (timeout
> 0);)
2490 //if (FIFO_RD_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) >= 4)
2491 if (DRV_Reg16(NFI_PIO_DIRDY_REG16
) & 1)
2493 *buf32
++ = DRV_Reg32(NFI_DATAR_REG32
);
2501 printk(KERN_ERR
"[%s] timeout\n", __FUNCTION__
);
2508 time2
= sched_clock() - time1
;
2511 readdatatime
= (time2
);
2517 static bool mtk_nand_read_page_data(struct mtd_info
*mtd
, u8
* pDataBuf
, u32 u4Size
)
2519 #if (__INTERNAL_USE_AHB_MODE__)
2520 return mtk_nand_dma_read_data(mtd
, pDataBuf
, u4Size
);
2522 return mtk_nand_mcu_read_data(mtd
, pDataBuf
, u4Size
);
2526 /******************************************************************************
2527 * mtk_nand_write_page_data
2530 * Fill the page data into buffer !
2533 * u8* pDataBuf, u32 u4Size
2541 ******************************************************************************/
2542 static bool mtk_nand_dma_write_data(struct mtd_info
*mtd
, u8
* pDataBuf
, u32 u4Size
)
2544 int i4Interrupt
= 0; //g_i4Interrupt;
2545 u32 timeout
= 0xFFFF;
2546 struct scatterlist sg
;
2547 enum dma_data_direction dir
= DMA_TO_DEVICE
;
2548 #if CFG_PERFLOG_DEBUG
2549 struct timeval stimer
,etimer
;
2550 do_gettimeofday(&stimer
);
2552 sg_init_one(&sg
, pDataBuf
, u4Size
);
2553 dma_map_sg(&(mtd
->dev
), &sg
, 1, dir
);
2555 NFI_CLN_REG16(NFI_CNFG_REG16
, CNFG_BYTE_RW
);
2556 DRV_Reg16(NFI_INTR_REG16
);
2557 DRV_WriteReg16(NFI_INTR_EN_REG16
, 0);
2558 // DRV_WriteReg32(NFI_STRADDR_REG32, (u32*)virt_to_phys(pDataBuf));
2560 if ((unsigned int)pDataBuf
% 16) // TODO: can not use AHB mode here
2562 printk(KERN_INFO
"Un-16-aligned address\n");
2563 NFI_CLN_REG16(NFI_CNFG_REG16
, CNFG_DMA_BURST_EN
);
2566 NFI_SET_REG16(NFI_CNFG_REG16
, CNFG_DMA_BURST_EN
);
2571 init_completion(&g_comp_AHB_Done
);
2572 DRV_Reg16(NFI_INTR_REG16
);
2573 DRV_WriteReg16(NFI_INTR_EN_REG16
, INTR_AHB_DONE_EN
);
2575 //dmac_clean_range(pDataBuf, pDataBuf + u4Size);
2577 NFI_SET_REG32(NFI_CON_REG16
, CON_NFI_BWR
);
2581 // Wait 10ms for AHB done
2582 if (!wait_for_completion_timeout(&g_comp_AHB_Done
, 10))
2584 MSG(READ
, "wait for completion timeout happened @ [%s]: %d\n", __FUNCTION__
, __LINE__
);
2590 // wait_for_completion(&g_comp_AHB_Done);
2593 while ((u4Size
>> host
->hw
->nand_sec_shift
) > ((DRV_Reg32(NFI_BYTELEN_REG16
) & 0x1f000) >> 12))
2598 printk(KERN_ERR
"[%s] poll BYTELEN error\n", __FUNCTION__
);
2600 return false; //4 // AHB Mode Time Out!
2606 dma_unmap_sg(&(mtd
->dev
), &sg
, 1, dir
);
2607 #if CFG_PERFLOG_DEBUG
2608 do_gettimeofday(&etimer
);
2609 g_NandPerfLog
.WriteDMATotalTime
+= Cal_timediff(&etimer
,&stimer
);
2610 g_NandPerfLog
.WriteDMACount
++;
2615 static bool mtk_nand_mcu_write_data(struct mtd_info
*mtd
, const u8
* buf
, u32 length
)
2617 u32 timeout
= 0xFFFF;
2620 NFI_CLN_REG16(NFI_CNFG_REG16
, CNFG_BYTE_RW
);
2622 NFI_SET_REG32(NFI_CON_REG16
, CON_NFI_BWR
);
2623 pBuf32
= (u32
*) buf
;
2625 if ((u32
) buf
% 4 || length
% 4)
2626 NFI_SET_REG16(NFI_CNFG_REG16
, CNFG_BYTE_RW
);
2628 NFI_CLN_REG16(NFI_CNFG_REG16
, CNFG_BYTE_RW
);
2630 if ((u32
) buf
% 4 || length
% 4)
2632 for (i
= 0; (i
< (length
)) && (timeout
> 0);)
2634 if (DRV_Reg16(NFI_PIO_DIRDY_REG16
) & 1)
2636 DRV_WriteReg32(NFI_DATAW_REG32
, *buf
++);
2644 printk(KERN_ERR
"[%s] timeout\n", __FUNCTION__
);
2651 for (i
= 0; (i
< (length
>> 2)) && (timeout
> 0);)
2653 // if (FIFO_WR_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) <= 12)
2654 if (DRV_Reg16(NFI_PIO_DIRDY_REG16
) & 1)
2656 DRV_WriteReg32(NFI_DATAW_REG32
, *pBuf32
++);
2664 printk(KERN_ERR
"[%s] timeout\n", __FUNCTION__
);
2674 static bool mtk_nand_write_page_data(struct mtd_info
*mtd
, u8
* buf
, u32 size
)
2676 #if (__INTERNAL_USE_AHB_MODE__)
2677 return mtk_nand_dma_write_data(mtd
, buf
, size
);
2679 return mtk_nand_mcu_write_data(mtd
, buf
, size
);
2683 /******************************************************************************
2684 * mtk_nand_read_fdm_data
2690 * u8* pDataBuf, u32 u4SecNum
2698 ******************************************************************************/
2699 static void mtk_nand_read_fdm_data(u8
* pDataBuf
, u32 u4SecNum
)
2702 u32
*pBuf32
= (u32
*) pDataBuf
;
2706 for (i
= 0; i
< u4SecNum
; ++i
)
2708 *pBuf32
++ = DRV_Reg32(NFI_FDM0L_REG32
+ (i
<< 1));
2709 *pBuf32
++ = DRV_Reg32(NFI_FDM0M_REG32
+ (i
<< 1));
2710 //*pBuf32++ = DRV_Reg32((u32)NFI_FDM0L_REG32 + (i<<3));
2711 //*pBuf32++ = DRV_Reg32((u32)NFI_FDM0M_REG32 + (i<<3));
2716 /******************************************************************************
2717 * mtk_nand_write_fdm_data
2720 * Write a fdm data !
2723 * u8* pDataBuf, u32 u4SecNum
2731 ******************************************************************************/
2732 static u8 fdm_buf
[128];
2733 static void mtk_nand_write_fdm_data(struct nand_chip
*chip
, u8
* pDataBuf
, u32 u4SecNum
)
2738 struct nand_oobfree
*free_entry
;
2741 memcpy(fdm_buf
, pDataBuf
, u4SecNum
* 8);
2743 free_entry
= chip
->ecc
.layout
->oobfree
;
2744 for (i
= 0; i
< MTD_MAX_OOBFREE_ENTRIES
&& free_entry
[i
].length
; i
++)
2746 for (j
= 0; j
< free_entry
[i
].length
; j
++)
2748 if (pDataBuf
[free_entry
[i
].offset
+ j
] != 0xFF)
2750 checksum
^= pDataBuf
[free_entry
[i
].offset
+ j
];
2756 fdm_buf
[free_entry
[i
- 1].offset
+ free_entry
[i
- 1].length
] = checksum
;
2759 pBuf32
= (u32
*) fdm_buf
;
2760 for (i
= 0; i
< u4SecNum
; ++i
)
2762 DRV_WriteReg32(NFI_FDM0L_REG32
+ (i
<< 1), *pBuf32
++);
2763 DRV_WriteReg32(NFI_FDM0M_REG32
+ (i
<< 1), *pBuf32
++);
2764 //DRV_WriteReg32((u32)NFI_FDM0L_REG32 + (i<<3), *pBuf32++);
2765 //DRV_WriteReg32((u32)NFI_FDM0M_REG32 + (i<<3), *pBuf32++);
2769 /******************************************************************************
2770 * mtk_nand_stop_read
2773 * Stop read operation !
2784 ******************************************************************************/
2785 static void mtk_nand_stop_read(void)
2787 NFI_CLN_REG32(NFI_CON_REG16
, CON_NFI_BRD
);
2793 DRV_WriteReg16(NFI_INTR_EN_REG16
, 0);
2796 /******************************************************************************
2797 * mtk_nand_stop_write
2800 * Stop write operation !
2811 ******************************************************************************/
2812 static void mtk_nand_stop_write(void)
2814 NFI_CLN_REG32(NFI_CON_REG16
, CON_NFI_BWR
);
2819 DRV_WriteReg16(NFI_INTR_EN_REG16
, 0);
2822 //---------------------------------------------------------------------------
2823 #define STATUS_READY (0x40)
2824 #define STATUS_FAIL (0x01)
2825 #define STATUS_WR_ALLOW (0x80)
2827 static bool mtk_nand_read_status(void)
2829 int status
= 0;//, i;
2830 unsigned int timeout
;
2834 /* Disable HW ECC */
2835 NFI_CLN_REG16(NFI_CNFG_REG16
, CNFG_HW_ECC_EN
);
2837 /* Disable 16-bit I/O */
2838 NFI_CLN_REG16(NFI_PAGEFMT_REG16
, PAGEFMT_DBYTE_EN
);
2839 NFI_SET_REG16(NFI_CNFG_REG16
, CNFG_OP_SRD
| CNFG_READ_EN
| CNFG_BYTE_RW
);
2841 DRV_WriteReg32(NFI_CON_REG16
, CON_NFI_SRD
| (1 << CON_NFI_NOB_SHIFT
));
2843 DRV_WriteReg32(NFI_CON_REG16
, 0x3);
2844 mtk_nand_set_mode(CNFG_OP_SRD
);
2845 DRV_WriteReg16(NFI_CNFG_REG16
, 0x2042);
2846 mtk_nand_set_command(NAND_CMD_STATUS
);
2847 DRV_WriteReg32(NFI_CON_REG16
, 0x90);
2849 timeout
= TIMEOUT_4
;
2850 WAIT_NFI_PIO_READY(timeout
);
2854 status
= (DRV_Reg16(NFI_DATAR_REG32
));
2857 DRV_WriteReg32(NFI_CON_REG16
, 0);
2859 if (devinfo
.iowidth
== 16)
2861 NFI_SET_REG16(NFI_PAGEFMT_REG16
, PAGEFMT_DBYTE_EN
);
2862 NFI_CLN_REG16(NFI_CNFG_REG16
, CNFG_BYTE_RW
);
2864 // check READY/BUSY status first
2865 if (!(STATUS_READY
& status
))
2867 //MSG(ERR, "status is not ready\n");
2869 // flash is ready now, check status code
2870 if (STATUS_FAIL
& status
)
2872 if (!(STATUS_WR_ALLOW
& status
))
2874 //MSG(INIT, "status locked\n");
2878 //MSG(INIT, "status unknown\n");
2887 bool mtk_nand_SetFeature(struct mtd_info
*mtd
, u16 cmd
, u32 addr
, u8
*value
, u8 bytes
)
2892 u32 timeout
=TIMEOUT_3
;//0xffff;
2894 // struct nand_chip *chip = (struct nand_chip *)mtd->priv;
2898 reg
= DRV_Reg32(NFI_NAND_TYPE_CNFG_REG32
);
2899 if (!(reg
&TYPE_SLC
))
2902 reg_val
|= (CNFG_OP_CUST
| CNFG_BYTE_RW
);
2903 DRV_WriteReg(NFI_CNFG_REG16
, reg_val
);
2905 mtk_nand_set_command(cmd
);
2906 mtk_nand_set_address(addr
, 0, 1, 0);
2908 mtk_nand_status_ready(STA_NFI_OP_MASK
);
2910 DRV_WriteReg32(NFI_CON_REG16
, 1 << CON_NFI_SEC_SHIFT
);
2911 NFI_SET_REG32(NFI_CON_REG16
, CON_NFI_BWR
);
2912 DRV_WriteReg(NFI_STRDATA_REG16
, 0x1);
2913 //printk("Bytes=%d\n", bytes);
2914 while ( (write_count
< bytes
) && timeout
)
2916 WAIT_NFI_PIO_READY(timeout
)
2923 //printk("VALUE1:0x%2X\n", *value);
2924 DRV_WriteReg8(NFI_DATAW_REG32
, *value
++);
2925 }else if(write_count
% 2)
2927 //printk("VALUE2:0x%2X\n", *value);
2928 DRV_WriteReg8(NFI_DATAW_REG32
, *value
++);
2932 //printk("VALUE3:0x%2X\n", *value);
2933 DRV_WriteReg8(NFI_DATAW_REG32
, *value
);
2936 timeout
= TIMEOUT_3
;
2938 *NFI_CNRNB_REG16
= 0x81;
2939 if (!mtk_nand_status_ready(STA_NAND_BUSY_RETURN
))
2944 //mtk_nand_read_status();
2950 bool mtk_nand_GetFeature(struct mtd_info
*mtd
, u16 cmd
, u32 addr
, u8
*value
, u8 bytes
)
2954 u32 timeout
=TIMEOUT_3
;//0xffff;
2955 // struct nand_chip *chip = (struct nand_chip *)mtd->priv;
2959 reg_val
|= (CNFG_OP_CUST
| CNFG_BYTE_RW
| CNFG_READ_EN
);
2960 DRV_WriteReg(NFI_CNFG_REG16
, reg_val
);
2962 mtk_nand_set_command(cmd
);
2963 mtk_nand_set_address(addr
, 0, 1, 0);
2964 mtk_nand_status_ready(STA_NFI_OP_MASK
);
2965 *NFI_CNRNB_REG16
= 0x81;
2966 mtk_nand_status_ready(STA_NAND_BUSY_RETURN
);
2968 //DRV_WriteReg32(NFI_CON_REG16, 0 << CON_NFI_SEC_SHIFT);
2969 reg_val
= DRV_Reg32(NFI_CON_REG16
);
2970 reg_val
&= ~CON_NFI_NOB_MASK
;
2971 reg_val
|= ((4 << CON_NFI_NOB_SHIFT
)|CON_NFI_SRD
);
2972 DRV_WriteReg32(NFI_CON_REG16
, reg_val
);
2973 DRV_WriteReg(NFI_STRDATA_REG16
, 0x1);
2975 while ( (read_count
< bytes
) && timeout
)
2977 WAIT_NFI_PIO_READY(timeout
)
2982 *value
++ = DRV_Reg8(NFI_DATAR_REG32
);
2983 //printk("Value[0x%02X]\n", DRV_Reg8(NFI_DATAR_REG32));
2985 timeout
= TIMEOUT_3
;
2987 // chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
2988 //mtk_nand_read_status();
2997 const u8 data_tbl
[8][5] =
2999 {0x04, 0x04, 0x7C, 0x7E, 0x00},
3000 {0x00, 0x7C, 0x78, 0x78, 0x00},
3001 {0x7C, 0x76, 0x74, 0x72, 0x00},
3002 {0x08, 0x08, 0x00, 0x00, 0x00},
3003 {0x0B, 0x7E, 0x76, 0x74, 0x00},
3004 {0x10, 0x76, 0x72, 0x70, 0x00},
3005 {0x02, 0x7C, 0x7E, 0x70, 0x00},
3006 {0x00, 0x00, 0x00, 0x00, 0x00}
3009 static void mtk_nand_modeentry_rrtry(void)
3013 mtk_nand_set_mode(CNFG_OP_CUST
);
3015 mtk_nand_set_command(0x5C);
3016 mtk_nand_set_command(0xC5);
3018 mtk_nand_status_ready(STA_NFI_OP_MASK
);
3021 static void mtk_nand_rren_rrtry(bool needB3
)
3025 mtk_nand_set_mode(CNFG_OP_CUST
);
3028 mtk_nand_set_command(0xB3);
3029 mtk_nand_set_command(0x26);
3030 mtk_nand_set_command(0x5D);
3032 mtk_nand_status_ready(STA_NFI_OP_MASK
);
3035 static void mtk_nand_sprmset_rrtry(u32 addr
, u32 data
) //single parameter setting
3040 u32 timeout
=TIMEOUT_3
;//0xffff;
3044 reg_val
|= (CNFG_OP_CUST
| CNFG_BYTE_RW
);
3045 DRV_WriteReg(NFI_CNFG_REG16
, reg_val
);
3046 mtk_nand_set_command(0x55);
3047 mtk_nand_set_address(addr
, 0, 1, 0);
3048 mtk_nand_status_ready(STA_NFI_OP_MASK
);
3049 DRV_WriteReg32(NFI_CON_REG16
, 1 << CON_NFI_SEC_SHIFT
);
3050 NFI_SET_REG32(NFI_CON_REG16
, CON_NFI_BWR
);
3051 DRV_WriteReg(NFI_STRDATA_REG16
, 0x1);
3052 WAIT_NFI_PIO_READY(timeout
);
3054 DRV_WriteReg8(NFI_DATAW_REG32
, data
);
3056 while(!(DRV_Reg32(NFI_STA_REG32
) & STA_NAND_BUSY_RETURN
) && (timeout
--));
3059 static void mtk_nand_toshiba_rrtry(struct mtd_info
*mtd
,flashdev_info_t deviceinfo
, u32 retryCount
, bool defValue
)
3063 u8 add_reg
[6] = {0x04, 0x05, 0x06, 0x07, 0x0D};
3065 acccon
= DRV_Reg32(NFI_ACCCON_REG32
);
3066 DRV_WriteReg32(NFI_ACCCON_REG32
, 0x31C08669); //to fit read retry timing
3069 mtk_nand_modeentry_rrtry();
3071 for(cnt
= 0; cnt
< 5; cnt
++)
3073 mtk_nand_sprmset_rrtry(add_reg
[cnt
], data_tbl
[retryCount
][cnt
]);
3077 mtk_nand_rren_rrtry(TRUE
);
3078 else if(6 > retryCount
)
3079 mtk_nand_rren_rrtry(FALSE
);
3081 if(7 == retryCount
) // to exit
3083 mtk_nand_device_reset();
3085 //should do NAND DEVICE interface change under sync mode
3088 DRV_WriteReg32(NFI_ACCCON_REG32
, acccon
);
3092 static void mtk_nand_micron_rrtry(struct mtd_info
*mtd
,flashdev_info_t deviceinfo
, u32 feature
, bool defValue
)
3094 //u32 feature = deviceinfo.feature_set.FeatureSet.readRetryStart+retryCount;
3095 mtk_nand_SetFeature(mtd
, deviceinfo
.feature_set
.FeatureSet
.sfeatureCmd
,\
3096 deviceinfo
.feature_set
.FeatureSet
.readRetryAddress
,\
3100 static int g_sandisk_retry_case
= 0; //for new read retry table case 1,2,3,4
3101 static void mtk_nand_sandisk_rrtry(struct mtd_info
*mtd
,flashdev_info_t deviceinfo
, u32 feature
, bool defValue
)
3103 //u32 feature = deviceinfo.feature_set.FeatureSet.readRetryStart+retryCount;
3104 if(FALSE
== defValue
)
3110 mtk_nand_device_reset();
3112 //should do NAND DEVICE interface change under sync mode
3115 mtk_nand_SetFeature(mtd
, deviceinfo
.feature_set
.FeatureSet
.sfeatureCmd
,\
3116 deviceinfo
.feature_set
.FeatureSet
.readRetryAddress
,\
3118 if(FALSE
== defValue
)
3120 if(g_sandisk_retry_case
> 1) //case 3
3122 if(g_sandisk_retry_case
== 3)
3124 u32 timeout
=TIMEOUT_3
;
3126 DRV_WriteReg(NFI_CNFG_REG16
, (CNFG_OP_CUST
| CNFG_BYTE_RW
));
3127 mtk_nand_set_command(0x5C);
3128 mtk_nand_set_command(0xC5);
3129 mtk_nand_set_command(0x55);
3130 mtk_nand_set_address(0x00, 0, 1, 0); // test mode entry
3131 mtk_nand_status_ready(STA_NFI_OP_MASK
);
3132 DRV_WriteReg32(NFI_CON_REG16
, 1 << CON_NFI_SEC_SHIFT
);
3133 NFI_SET_REG32(NFI_CON_REG16
, CON_NFI_BWR
);
3134 DRV_WriteReg(NFI_STRDATA_REG16
, 0x1);
3135 WAIT_NFI_PIO_READY(timeout
);
3136 DRV_WriteReg8(NFI_DATAW_REG32
, 0x01);
3137 while(!(DRV_Reg32(NFI_STA_REG32
) & STA_NAND_BUSY_RETURN
) && (timeout
--));
3140 mtk_nand_set_command(0x55);
3141 mtk_nand_set_address(0x23, 0, 1, 0); //changing parameter LMFLGFIX_NEXT = 1 to all die
3142 mtk_nand_status_ready(STA_NFI_OP_MASK
);
3143 DRV_WriteReg32(NFI_CON_REG16
, 1 << CON_NFI_SEC_SHIFT
);
3144 NFI_SET_REG32(NFI_CON_REG16
, CON_NFI_BWR
);
3145 DRV_WriteReg(NFI_STRDATA_REG16
, 0x1);
3146 WAIT_NFI_PIO_READY(timeout
);
3147 DRV_WriteReg8(NFI_DATAW_REG32
, 0xC0);
3148 while(!(DRV_Reg32(NFI_STA_REG32
) & STA_NAND_BUSY_RETURN
) && (timeout
--));
3150 printk("Case3# Set LMFLGFIX_NEXT=1\n");
3152 mtk_nand_set_command(0x25);
3153 printk("Case2#3# Set cmd 25\n");
3155 mtk_nand_set_command(deviceinfo
.feature_set
.FeatureSet
.readRetryPreCmd
);
3159 //sandisk 19nm read retry
3160 u16 sandisk_19nm_rr_table
[18] =
3163 0xFF0F, 0xEEFE, 0xDDFD, 0x11EE, //04h[7:4] | 07h[7:4] | 04h[3:0] | 05h[7:4]
3164 0x22ED, 0x33DF, 0xCDDE, 0x01DD,
3165 0x0211, 0x1222, 0xBD21, 0xAD32,
3166 0x9DF0, 0xBCEF, 0xACDC, 0x9CFF,
3170 static void sandisk_19nm_rr_init(void)
3174 u32 timeout
= 0xffff;
3175 u32 u4RandomSetting
;
3178 acccon
= DRV_Reg32(NFI_ACCCON_REG32
);
3179 DRV_WriteReg32(NFI_ACCCON_REG32
, 0x31C08669); //to fit read retry timing
3183 reg_val
= (CNFG_OP_CUST
| CNFG_BYTE_RW
);
3184 DRV_WriteReg(NFI_CNFG_REG16
, reg_val
);
3185 mtk_nand_set_command(0x3B);
3186 mtk_nand_set_command(0xB9);
3188 for(count
= 0; count
< 9; count
++)
3190 mtk_nand_set_command(0x53);
3191 mtk_nand_set_address((0x04 + count
), 0, 1, 0);
3192 DRV_WriteReg(NFI_CON_REG16
, (CON_NFI_BWR
| (1 << CON_NFI_SEC_SHIFT
)));
3193 DRV_WriteReg(NFI_STRDATA_REG16
, 1);
3195 WAIT_NFI_PIO_READY(timeout
);
3196 DRV_WriteReg32(NFI_DATAW_REG32
, 0x00);
3200 DRV_WriteReg32(NFI_ACCCON_REG32
, acccon
);
3203 static void sandisk_19nm_rr_loading(u32 retryCount
, bool defValue
)
3206 u32 timeout
= 0xffff;
3209 u8 cmd_reg
[4] = {0x4, 0x5, 0x7};
3210 acccon
= DRV_Reg32(NFI_ACCCON_REG32
);
3211 DRV_WriteReg32(NFI_ACCCON_REG32
, 0x31C08669); //to fit read retry timing
3215 reg_val
= (CNFG_OP_CUST
| CNFG_BYTE_RW
);
3216 DRV_WriteReg(NFI_CNFG_REG16
, reg_val
);
3218 if((0 != retryCount
) || defValue
)
3220 mtk_nand_set_command(0xD6);
3223 mtk_nand_set_command(0x3B);
3224 mtk_nand_set_command(0xB9);
3225 for(count
= 0; count
< 3; count
++)
3227 mtk_nand_set_command(0x53);
3228 mtk_nand_set_address(cmd_reg
[count
], 0, 1, 0);
3229 DRV_WriteReg(NFI_CON_REG16
, (CON_NFI_BWR
| (1 << CON_NFI_SEC_SHIFT
)));
3230 DRV_WriteReg(NFI_STRDATA_REG16
, 1);
3232 WAIT_NFI_PIO_READY(timeout
);
3234 DRV_WriteReg32(NFI_DATAW_REG32
, (((sandisk_19nm_rr_table
[retryCount
] & 0xF000) >> 8) | ((sandisk_19nm_rr_table
[retryCount
] & 0x00F0) >> 4)));
3236 DRV_WriteReg32(NFI_DATAW_REG32
, ((sandisk_19nm_rr_table
[retryCount
] & 0x000F) << 4));
3238 DRV_WriteReg32(NFI_DATAW_REG32
, ((sandisk_19nm_rr_table
[retryCount
] & 0x0F00) >> 4));
3245 mtk_nand_set_command(0xB6);
3248 DRV_WriteReg32(NFI_ACCCON_REG32
, acccon
);
3251 static void mtk_nand_sandisk_19nm_rrtry(struct mtd_info
*mtd
,flashdev_info_t deviceinfo
, u32 retryCount
, bool defValue
)
3253 if((retryCount
== 0) && (!defValue
))
3254 sandisk_19nm_rr_init();
3255 sandisk_19nm_rr_loading(retryCount
, defValue
);
3258 #define HYNIX_RR_TABLE_SIZE (1026) //hynix read retry table size
3259 #define SINGLE_RR_TABLE_SIZE (64)
3261 #define READ_RETRY_STEP (devinfo.feature_set.FeatureSet.readRetryCnt + devinfo.feature_set.FeatureSet.readRetryStart) // 8 step or 12 step to fix read retry table
3262 #define HYNIX_16NM_RR_TABLE_SIZE ((READ_RETRY_STEP == 12)?(784):(528)) //hynix read retry table size
3263 #define SINGLE_RR_TABLE_16NM_SIZE ((READ_RETRY_STEP == 12)?(48):(32))
3265 u8 nand_hynix_rr_table
[(HYNIX_RR_TABLE_SIZE
+16)/16*16]; //align as 16 byte
3267 #define NAND_HYX_RR_TBL_BUF nand_hynix_rr_table
3269 static u8 real_hynix_rr_table_idx
= 0;
3270 static u32 g_hynix_retry_count
= 0;
3272 static bool hynix_rr_table_select(u8 table_index
, flashdev_info_t
*deviceinfo
)
3275 u32 table_size
= (deviceinfo
->feature_set
.FeatureSet
.rtype
== RTYPE_HYNIX_16NM
)?SINGLE_RR_TABLE_16NM_SIZE
: SINGLE_RR_TABLE_SIZE
;
3277 for(i
= 0; i
< table_size
; i
++)
3279 u8
*temp_rr_table
= (u8
*)NAND_HYX_RR_TBL_BUF
+table_size
*table_index
*2+2;
3280 u8
*temp_inversed_rr_table
= (u8
*)NAND_HYX_RR_TBL_BUF
+table_size
*table_index
*2+table_size
+2;
3281 if(deviceinfo
->feature_set
.FeatureSet
.rtype
== RTYPE_HYNIX_16NM
)
3283 temp_rr_table
+= 14;
3284 temp_inversed_rr_table
+= 14;
3286 if(0xFF != (temp_rr_table
[i
] ^ temp_inversed_rr_table
[i
]))
3287 return FALSE
; // error table
3290 if(deviceinfo
->feature_set
.FeatureSet
.rtype
== RTYPE_HYNIX_16NM
)
3294 for(i
= 0; i
< table_size
; i
++)
3296 printk("%02X ", NAND_HYX_RR_TBL_BUF
[i
]);
3300 return TRUE
; // correct table
3303 static void HYNIX_RR_TABLE_READ(flashdev_info_t
*deviceinfo
)
3306 u32 read_count
= 0, max_count
= HYNIX_RR_TABLE_SIZE
;
3307 u32 timeout
= 0xffff;
3308 u8
* rr_table
= (u8
*)(NAND_HYX_RR_TBL_BUF
);
3310 u8 add_reg1
[3] = {0xFF, 0xCC};
3311 u8 data_reg1
[3] = {0x40, 0x4D};
3312 u8 cmd_reg
[6] = {0x16, 0x17, 0x04, 0x19, 0x00};
3313 u8 add_reg2
[6] = {0x00, 0x00, 0x00, 0x02, 0x00};
3314 bool RR_TABLE_EXIST
= TRUE
;
3315 if(deviceinfo
->feature_set
.FeatureSet
.rtype
== RTYPE_HYNIX_16NM
)
3319 data_reg1
[1] = 0x52;
3320 max_count
= HYNIX_16NM_RR_TABLE_SIZE
;
3321 if(READ_RETRY_STEP
== 12)
3326 mtk_nand_device_reset();
3327 // take care under sync mode. need change nand device inferface xiaolei
3331 DRV_WriteReg(NFI_CNFG_REG16
, (CNFG_OP_CUST
| CNFG_BYTE_RW
));
3333 mtk_nand_set_command(0x36);
3335 for(; read_count
< 2; read_count
++)
3337 mtk_nand_set_address(add_reg1
[read_count
],0,1,0);
3338 DRV_WriteReg(NFI_CON_REG16
, (CON_NFI_BWR
| (1 << CON_NFI_SEC_SHIFT
)));
3339 DRV_WriteReg(NFI_STRDATA_REG16
, 1);
3341 WAIT_NFI_PIO_READY(timeout
);
3342 DRV_WriteReg32(NFI_DATAW_REG32
, data_reg1
[read_count
]);
3346 for(read_count
= 0; read_count
< 5; read_count
++)
3348 mtk_nand_set_command(cmd_reg
[read_count
]);
3350 for(read_count
= 0; read_count
< 5; read_count
++)
3352 mtk_nand_set_address(add_reg2
[read_count
],0,1,0);
3354 mtk_nand_set_command(0x30);
3355 DRV_WriteReg(NFI_CNRNB_REG16
, 0xF1);
3357 while(!(DRV_Reg32(NFI_STA_REG32
) & STA_NAND_BUSY_RETURN
) && (timeout
--));
3359 reg_val
= (CNFG_OP_CUST
| CNFG_BYTE_RW
| CNFG_READ_EN
);
3360 DRV_WriteReg(NFI_CNFG_REG16
, reg_val
);
3361 DRV_WriteReg(NFI_CON_REG16
, (CON_NFI_BRD
| (2<< CON_NFI_SEC_SHIFT
)));
3362 DRV_WriteReg(NFI_STRDATA_REG16
, 0x1);
3364 read_count
= 0; // how????
3365 while ((read_count
< max_count
) && timeout
)
3367 WAIT_NFI_PIO_READY(timeout
);
3368 *rr_table
++ = (U8
)DRV_Reg32(NFI_DATAR_REG32
);
3373 mtk_nand_device_reset();
3374 // take care under sync mode. need change nand device inferface xiaolei
3376 reg_val
= (CNFG_OP_CUST
| CNFG_BYTE_RW
);
3377 if(deviceinfo
->feature_set
.FeatureSet
.rtype
== RTYPE_HYNIX_16NM
)
3379 DRV_WriteReg(NFI_CNFG_REG16
, reg_val
);
3380 mtk_nand_set_command(0x36);
3381 mtk_nand_set_address(0x38,0,1,0);
3382 DRV_WriteReg(NFI_CON_REG16
, (CON_NFI_BWR
| (1 << CON_NFI_SEC_SHIFT
)));
3383 DRV_WriteReg(NFI_STRDATA_REG16
, 1);
3384 WAIT_NFI_PIO_READY(timeout
);
3385 DRV_WriteReg32(NFI_DATAW_REG32
, 0x00);
3387 mtk_nand_set_command(0x16);
3388 mtk_nand_set_command(0x00);
3389 mtk_nand_set_address(0x00,0,1,0);//dummy read, add don't care
3390 mtk_nand_set_command(0x30);
3393 DRV_WriteReg(NFI_CNFG_REG16
, reg_val
);
3394 mtk_nand_set_command(0x38);
3397 while(!(DRV_Reg32(NFI_STA_REG32
) & STA_NAND_BUSY_RETURN
) && (timeout
--));
3398 rr_table
= (u8
*)(NAND_HYX_RR_TBL_BUF
);
3399 if(deviceinfo
->feature_set
.FeatureSet
.rtype
== RTYPE_HYNIX
)
3401 if((rr_table
[0] != 8) || (rr_table
[1] != 8))
3403 RR_TABLE_EXIST
= FALSE
;
3407 else if(deviceinfo
->feature_set
.FeatureSet
.rtype
== RTYPE_HYNIX_16NM
)
3409 for(read_count
=0;read_count
<8;read_count
++)
3411 if((rr_table
[read_count
] != 8) || (rr_table
[read_count
+8] != 4))
3413 RR_TABLE_EXIST
= FALSE
;
3420 for(table_index
= 0 ;table_index
< 8; table_index
++)
3422 if(hynix_rr_table_select(table_index
, deviceinfo
))
3424 real_hynix_rr_table_idx
= table_index
;
3425 MSG(INIT
, "Hynix rr_tbl_id %d\n",real_hynix_rr_table_idx
);
3429 if(table_index
== 8)
3436 MSG(INIT
, "Hynix RR table index error!\n");
3440 static void HYNIX_Set_RR_Para(u32 rr_index
, flashdev_info_t
*deviceinfo
)
3444 u8 count
, max_count
= 8;
3445 u8 add_reg
[9] = {0xCC, 0xBF, 0xAA, 0xAB, 0xCD, 0xAD, 0xAE, 0xAF};
3446 u8
*hynix_rr_table
= (u8
*)NAND_HYX_RR_TBL_BUF
+SINGLE_RR_TABLE_SIZE
*real_hynix_rr_table_idx
*2+2;
3447 if(deviceinfo
->feature_set
.FeatureSet
.rtype
== RTYPE_HYNIX_16NM
)
3449 add_reg
[0] = 0x38; //0x38, 0x39, 0x3A, 0x3B
3450 for(count
=1; count
< 4; count
++)
3452 add_reg
[count
] = add_reg
[0] + count
;
3454 hynix_rr_table
+= 14;
3459 DRV_WriteReg(NFI_CNFG_REG16
, (CNFG_OP_CUST
| CNFG_BYTE_RW
));
3460 //mtk_nand_set_command(0x36);
3462 for(count
= 0; count
< max_count
; count
++)
3464 mtk_nand_set_command(0x36);
3465 mtk_nand_set_address(add_reg
[count
], 0, 1, 0);
3466 DRV_WriteReg(NFI_CON_REG16
, (CON_NFI_BWR
| (1 << CON_NFI_SEC_SHIFT
)));
3467 DRV_WriteReg(NFI_STRDATA_REG16
, 1);
3469 WAIT_NFI_PIO_READY(timeout
);
3472 printk("HYNIX_Set_RR_Para timeout\n");
3475 DRV_WriteReg32(NFI_DATAW_REG32
, hynix_rr_table
[rr_index
*max_count
+ count
]);
3478 mtk_nand_set_command(0x16);
3481 static void HYNIX_Get_RR_Para(u32 rr_index
, flashdev_info_t
*deviceinfo
)
3485 u8 count
, max_count
= 8;
3486 u8 add_reg
[9] = {0xCC, 0xBF, 0xAA, 0xAB, 0xCD, 0xAD, 0xAE, 0xAF};
3487 u8
*hynix_rr_table
= (u8
*)NAND_HYX_RR_TBL_BUF
+SINGLE_RR_TABLE_SIZE
*real_hynix_rr_table_idx
*2+2;
3488 if(deviceinfo
->feature_set
.FeatureSet
.rtype
== RTYPE_HYNIX_16NM
)
3490 add_reg
[0] = 0x38; //0x38, 0x39, 0x3A, 0x3B
3491 for(count
=1; count
< 4; count
++)
3493 add_reg
[count
] = add_reg
[0] + count
;
3495 hynix_rr_table
+= 14;
3500 DRV_WriteReg(NFI_CNFG_REG16
, (CNFG_OP_CUST
| CNFG_BYTE_RW
| CNFG_READ_EN
));
3501 //mtk_nand_set_command(0x37);
3503 for(count
= 0; count
< max_count
; count
++)
3505 mtk_nand_set_command(0x37);
3506 mtk_nand_set_address(add_reg
[count
], 0, 1, 0);
3508 DRV_WriteReg(NFI_CON_REG16
, (CON_NFI_SRD
| (1 << CON_NFI_NOB_SHIFT
)));
3509 DRV_WriteReg(NFI_STRDATA_REG16
, 1);
3512 WAIT_NFI_PIO_READY(timeout
);
3515 printk("HYNIX_Get_RR_Para timeout\n");
3517 //DRV_WriteReg32(NFI_DATAW_REG32, hynix_rr_table[rr_index*max_count + count]);
3518 printk("Get[%02X]%02X\n",add_reg
[count
], DRV_Reg8(NFI_DATAR_REG32
));
3523 static void mtk_nand_hynix_rrtry(struct mtd_info
*mtd
, flashdev_info_t deviceinfo
, u32 retryCount
, bool defValue
)
3525 if(defValue
== FALSE
)
3527 if(g_hynix_retry_count
== READ_RETRY_STEP
)
3529 g_hynix_retry_count
= 0;
3531 printk("Hynix Retry %d\n", g_hynix_retry_count
);
3532 HYNIX_Set_RR_Para(g_hynix_retry_count
, &deviceinfo
);
3533 //HYNIX_Get_RR_Para(g_hynix_retry_count, &deviceinfo);
3534 g_hynix_retry_count
++;
3538 static void mtk_nand_hynix_16nm_rrtry(struct mtd_info
*mtd
, flashdev_info_t deviceinfo
, u32 retryCount
, bool defValue
)
3540 if(defValue
== FALSE
)
3542 if(g_hynix_retry_count
== READ_RETRY_STEP
)
3544 g_hynix_retry_count
= 0;
3546 printk("Hynix 16nm Retry %d\n", g_hynix_retry_count
);
3547 HYNIX_Set_RR_Para(g_hynix_retry_count
, &deviceinfo
);
3549 //HYNIX_Get_RR_Para(g_hynix_retry_count, &deviceinfo);
3550 g_hynix_retry_count
++;
3556 u32 special_rrtry_setting
[36]=
3558 0x00000000,0x7C00007C,0x787C0004,0x74780078,
3559 0x7C007C08,0x787C7C00,0x74787C7C,0x70747C00,
3560 0x7C007800,0x787C7800,0x74787800,0x70747800,
3561 0x6C707800,0x00040400,0x7C000400,0x787C040C,
3562 0x7478040C,0x7C000810,0x00040810,0x04040C0C,
3563 0x00040C10,0x00081014,0x000C1418,0x7C040C0C,
3564 0x74787478,0x70747478,0x6C707478,0x686C7478,
3565 0x74787078,0x70747078,0x686C7078,0x6C707078,
3566 0x6C706C78,0x686C6C78,0x64686C78,0x686C6874,
3570 static u32
mtk_nand_rrtry_setting(flashdev_info_t deviceinfo
, enum readRetryType type
, u32 retryStart
, u32 loopNo
)
3573 //if(RTYPE_MICRON == type || RTYPE_SANDISK== type || RTYPE_TOSHIBA== type || RTYPE_HYNIX== type)
3575 if(retryStart
!= 0xFFFFFFFF)
3577 value
= retryStart
+loopNo
;
3581 value
= special_rrtry_setting
[loopNo
];
3588 typedef u32 (*rrtryFunctionType
)(struct mtd_info
*mtd
,flashdev_info_t deviceinfo
, u32 feature
, bool defValue
);
3590 static rrtryFunctionType rtyFuncArray
[]=
3592 mtk_nand_micron_rrtry
,
3593 mtk_nand_sandisk_rrtry
,
3594 mtk_nand_sandisk_19nm_rrtry
,
3595 mtk_nand_toshiba_rrtry
,
3596 mtk_nand_hynix_rrtry
,
3597 mtk_nand_hynix_16nm_rrtry
3601 static void mtk_nand_rrtry_func(struct mtd_info
*mtd
,flashdev_info_t deviceinfo
, u32 feature
, bool defValue
)
3603 rtyFuncArray
[deviceinfo
.feature_set
.FeatureSet
.rtype
](mtd
,deviceinfo
, feature
,defValue
);
3606 /******************************************************************************
3607 * mtk_nand_exec_read_page
3610 * Read a page data !
3613 * struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize,
3614 * u8* pPageBuf, u8* pFDMBuf
3622 ******************************************************************************/
3623 int mtk_nand_exec_read_page(struct mtd_info
*mtd
, u32 u4RowAddr
, u32 u4PageSize
, u8
* pPageBuf
, u8
* pFDMBuf
)
3626 int bRet
= ERR_RTN_SUCCESS
;
3627 struct nand_chip
*nand
= mtd
->priv
;
3628 u32 u4SecNum
= u4PageSize
>> host
->hw
->nand_sec_shift
;
3629 u32 backup_corrected
, backup_failed
;
3630 bool readRetry
= FALSE
;
3633 u32 tempBitMap
, bitMap
, i
;
3635 struct timeval pfm_time_read
;
3638 unsigned short PageFmt_Reg
= 0;
3639 unsigned int NAND_ECC_Enc_Reg
= 0;
3640 unsigned int NAND_ECC_Dec_Reg
= 0;
3642 //MSG(INIT, "mtk_nand_exec_read_page, host->hw->nand_sec_shift: %d\n", host->hw->nand_sec_shift);
3643 //MSG(INIT, "mtk_nand_exec_read_page,u4RowAddr: 0x%x\n", u4RowAddr);
3644 PFM_BEGIN(pfm_time_read
);
3647 if (((u32
) pPageBuf
% 16) && local_buffer_16_align
)
3649 buf
= local_buffer_16_align
;
3652 if(virt_addr_valid (pPageBuf
)==0)
3653 { // It should be allocated by vmalloc
3654 buf
= local_buffer_16_align
;
3661 backup_corrected
= mtd
->ecc_stats
.corrected
;
3662 backup_failed
= mtd
->ecc_stats
.failed
;
3668 u4RowAddr
= mtk_nand_cs_on(nand
, NFI_TRICKY_CS
, u4RowAddr
);
3673 if(use_randomizer
&& u4RowAddr
>= RAND_START_ADDR
)
3674 { mtk_nand_turn_on_randomizer(u4RowAddr
, 0, 0);}
3675 else if(pre_randomizer
&& u4RowAddr
< RAND_START_ADDR
)
3676 { mtk_nand_turn_on_randomizer(u4RowAddr
, 0, 0);}
3677 if (mtk_nand_ready_for_read(nand
, u4RowAddr
, 0, u4SecNum
, true, buf
))
3679 if (!mtk_nand_read_page_data(mtd
, buf
, u4PageSize
))
3681 MSG(INIT
, "mtk_nand_read_page_data fail\n");
3682 bRet
= ERR_RTN_FAIL
;
3685 if (!mtk_nand_status_ready(STA_NAND_BUSY
))
3687 MSG(INIT
, "mtk_nand_status_ready fail\n");
3688 bRet
= ERR_RTN_FAIL
;
3692 if (!mtk_nand_check_dececc_done(u4SecNum
))
3694 MSG(INIT
, "mtk_nand_check_dececc_done fail\n");
3695 bRet
= ERR_RTN_FAIL
;
3698 mtk_nand_read_fdm_data(pFDMBuf
, u4SecNum
);
3701 if (!mtk_nand_check_bch_error(mtd
, buf
, pFDMBuf
,u4SecNum
- 1, u4RowAddr
, &tempBitMap
))
3703 if(devinfo
.vendor
!= VEND_NONE
){
3706 MSG(INIT
, "mtk_nand_check_bch_error fail, retryCount:%d\n",retryCount
);
3707 bRet
= ERR_RTN_BCH_FAIL
;
3711 if(0 != (DRV_Reg32(NFI_STA_REG32
) & STA_READ_EMPTY
)) // if empty
3715 MSG(INIT
,"NFI read retry read empty page, return as uncorrectable\n");
3716 mtd
->ecc_stats
.failed
+=u4SecNum
;
3717 bRet
= ERR_RTN_BCH_FAIL
;
3722 mtk_nand_stop_read();
3724 if(use_randomizer
&& u4RowAddr
>= RAND_START_ADDR
)
3725 { mtk_nand_turn_off_randomizer();}
3726 else if(pre_randomizer
&& u4RowAddr
< RAND_START_ADDR
)
3727 { mtk_nand_turn_off_randomizer();}
3729 if (bRet
== ERR_RTN_BCH_FAIL
)
3731 tempBitMap
-= (tempBitMap
&bitMap
);
3734 MSG(INIT
, "read retry has partial data correct 0x%x\n",tempBitMap
);
3735 for(i
= 0; i
< u4SecNum
; i
++)
3737 if((tempBitMap
& (1 << i
)) != 0)
3739 memcpy((temp_buffer_16_align
+(u4SecSize
*i
)),(buf
+(u4SecSize
*i
)),u4SecSize
);
3740 memcpy((temp_buffer_16_align
+mtd
->writesize
+(8*i
)),(pFDMBuf
+(8*i
)),8);
3743 bitMap
|= tempBitMap
;
3745 if(bitMap
== ((1 << u4SecNum
) - 1))
3747 MSG(INIT
, "read retry has reformat the page data correctly @ page 0x%x\n",u4RowAddr
);
3748 memcpy(buf
,temp_buffer_16_align
,mtd
->writesize
);
3749 memcpy(pFDMBuf
,(temp_buffer_16_align
+mtd
->writesize
),8*u4SecNum
);
3750 mtd
->ecc_stats
.corrected
++;
3751 mtd
->ecc_stats
.failed
= backup_failed
;
3752 bRet
= ERR_RTN_SUCCESS
;
3756 if (bRet
== ERR_RTN_BCH_FAIL
)
3760 //feature= devinfo.feature_set.FeatureSet.readRetryStart+retryCount;
3761 feature
= mtk_nand_rrtry_setting(devinfo
, devinfo
.feature_set
.FeatureSet
.rtype
,devinfo
.feature_set
.FeatureSet
.readRetryStart
,retryCount
);
3762 if(retryCount
< devinfo
.feature_set
.FeatureSet
.readRetryCnt
)
3764 mtd
->ecc_stats
.corrected
= backup_corrected
;
3765 mtd
->ecc_stats
.failed
= backup_failed
;
3766 mtk_nand_rrtry_func(mtd
,devinfo
,feature
,FALSE
);
3771 feature
= devinfo
.feature_set
.FeatureSet
.readRetryDefault
;
3772 // sandisk case 2/3/4
3773 if((devinfo
.feature_set
.FeatureSet
.rtype
== RTYPE_SANDISK
) && (g_sandisk_retry_case
< 3))
3775 g_sandisk_retry_case
++;
3776 printk("Sandisk read retry case#%d\n", g_sandisk_retry_case
);
3778 mtd
->ecc_stats
.corrected
= backup_corrected
;
3779 mtd
->ecc_stats
.failed
= backup_failed
;
3780 mtk_nand_rrtry_func(mtd
,devinfo
,feature
,FALSE
);
3781 //if((g_sandisk_retry_case == 0) || (g_sandisk_retry_case == 2))
3783 // mtk_nand_set_command(0x26);
3788 mtk_nand_rrtry_func(mtd
,devinfo
,feature
,TRUE
);
3790 g_sandisk_retry_case
= 0;
3793 if((g_sandisk_retry_case
== 1) || (g_sandisk_retry_case
== 3))
3795 mtk_nand_set_command(0x26);
3796 printk("Case1#3# Set cmd 26\n");
3801 if((retryCount
!= 0) && MLC_DEVICE
)
3803 u32 feature
= devinfo
.feature_set
.FeatureSet
.readRetryDefault
;
3804 mtk_nand_rrtry_func(mtd
,devinfo
,feature
,TRUE
);
3807 g_sandisk_retry_case
= 0;
3809 if(TRUE
== readRetry
)
3810 bRet
= ERR_RTN_SUCCESS
;
3814 u32 feature
= devinfo
.feature_set
.FeatureSet
.readRetryDefault
;
3815 if(bRet
== ERR_RTN_SUCCESS
)
3817 MSG(INIT
, "u4RowAddr:0x%x read retry pass, retrycnt:%d ENUM0:%x,ENUM1:%x,mtd_ecc(A):%x,mtd_ecc(B):%x \n",u4RowAddr
,retryCount
,DRV_Reg32(ECC_DECENUM1_REG32
),DRV_Reg32(ECC_DECENUM0_REG32
),mtd
->ecc_stats
.failed
,backup_failed
);
3818 mtd
->ecc_stats
.corrected
++;
3819 if((devinfo
.feature_set
.FeatureSet
.rtype
== RTYPE_HYNIX_16NM
) || (devinfo
.feature_set
.FeatureSet
.rtype
== RTYPE_HYNIX
))
3821 g_hynix_retry_count
--;
3826 MSG(INIT
, "u4RowAddr:0x%x read retry fail, mtd_ecc(A):%x ,fail, mtd_ecc(B):%x\n",u4RowAddr
,mtd
->ecc_stats
.failed
,backup_failed
);
3828 mtk_nand_rrtry_func(mtd
,devinfo
,feature
,TRUE
);
3829 g_sandisk_retry_case
= 0;
3832 if (buf
== local_buffer_16_align
)
3834 memcpy(pPageBuf
, buf
, u4PageSize
);
3836 if(bRet
!= ERR_RTN_SUCCESS
)
3838 MSG(INIT
,"ECC uncorrectable , fake buffer returned\n");
3839 memset(pPageBuf
,0xff,u4PageSize
);
3840 memset(pFDMBuf
,0xff,u4SecNum
*8);
3843 PFM_END_R(pfm_time_read
, u4PageSize
+ 32);
3848 bool mtk_nand_exec_read_sector(struct mtd_info
*mtd
, u32 u4RowAddr
, u32 u4ColAddr
, u32 u4PageSize
, u8
* pPageBuf
, u8
* pFDMBuf
, int subpageno
)
3851 int bRet
= ERR_RTN_SUCCESS
;
3852 struct nand_chip
*nand
= mtd
->priv
;
3853 u32 u4SecNum
= subpageno
;
3854 u32 backup_corrected
, backup_failed
;
3855 bool readRetry
= FALSE
;
3859 struct timeval pfm_time_read
;
3862 unsigned short PageFmt_Reg
= 0;
3863 unsigned int NAND_ECC_Enc_Reg
= 0;
3864 unsigned int NAND_ECC_Dec_Reg
= 0;
3866 //MSG(INIT, "mtk_nand_exec_read_page, host->hw->nand_sec_shift: %d\n", host->hw->nand_sec_shift);
3868 PFM_BEGIN(pfm_time_read
);
3870 if (((u32
) pPageBuf
% 16) && local_buffer_16_align
)
3872 buf
= local_buffer_16_align
;
3875 if(virt_addr_valid (pPageBuf
)==0)
3876 { // It should be allocated by vmalloc
3877 buf
= local_buffer_16_align
;
3884 backup_corrected
= mtd
->ecc_stats
.corrected
;
3885 backup_failed
= mtd
->ecc_stats
.failed
;
3889 u4RowAddr
= mtk_nand_cs_on(nand
, NFI_TRICKY_CS
, u4RowAddr
);
3893 if(use_randomizer
&& u4RowAddr
>= RAND_START_ADDR
)
3894 { mtk_nand_turn_on_randomizer(u4RowAddr
, 0, 0);}
3895 else if(pre_randomizer
&& u4RowAddr
< RAND_START_ADDR
)
3896 { mtk_nand_turn_on_randomizer(u4RowAddr
, 0, 0);}
3897 if (mtk_nand_ready_for_read(nand
, u4RowAddr
, u4ColAddr
, u4SecNum
, true, buf
))
3899 if (!mtk_nand_read_page_data(mtd
, buf
, u4PageSize
))
3901 MSG(INIT
, "mtk_nand_read_page_data fail\n");
3902 bRet
= ERR_RTN_FAIL
;
3905 if (!mtk_nand_status_ready(STA_NAND_BUSY
))
3907 MSG(INIT
, "mtk_nand_status_ready fail\n");
3908 bRet
= ERR_RTN_FAIL
;
3912 if (!mtk_nand_check_dececc_done(u4SecNum
))
3914 MSG(INIT
, "mtk_nand_check_dececc_done fail\n");
3915 bRet
= ERR_RTN_FAIL
;
3918 mtk_nand_read_fdm_data(pFDMBuf
, u4SecNum
);
3921 if (!mtk_nand_check_bch_error(mtd
, buf
, pFDMBuf
,u4SecNum
- 1, u4RowAddr
, NULL
))
3923 if(devinfo
.vendor
!= VEND_NONE
){
3926 MSG(INIT
, "mtk_nand_check_bch_error fail, retryCount:%d\n",retryCount
);
3927 bRet
= ERR_RTN_BCH_FAIL
;
3931 if(0 != (DRV_Reg32(NFI_STA_REG32
) & STA_READ_EMPTY
)) // if empty
3935 MSG(INIT
,"NFI read retry read empty page, return as uncorrectable\n");
3936 mtd
->ecc_stats
.failed
+=u4SecNum
;
3937 bRet
= ERR_RTN_BCH_FAIL
;
3942 mtk_nand_stop_read();
3944 if(use_randomizer
&& u4RowAddr
>= RAND_START_ADDR
)
3945 { mtk_nand_turn_off_randomizer();}
3946 else if(pre_randomizer
&& u4RowAddr
< RAND_START_ADDR
)
3947 { mtk_nand_turn_off_randomizer();}
3948 if (bRet
== ERR_RTN_BCH_FAIL
)
3950 u32 feature
= mtk_nand_rrtry_setting(devinfo
, devinfo
.feature_set
.FeatureSet
.rtype
,devinfo
.feature_set
.FeatureSet
.readRetryStart
,retryCount
);
3951 if(retryCount
< devinfo
.feature_set
.FeatureSet
.readRetryCnt
)
3953 mtd
->ecc_stats
.corrected
= backup_corrected
;
3954 mtd
->ecc_stats
.failed
= backup_failed
;
3955 mtk_nand_rrtry_func(mtd
,devinfo
,feature
,FALSE
);
3960 feature
= devinfo
.feature_set
.FeatureSet
.readRetryDefault
;
3961 // sandisk case 2/3/4
3962 if((devinfo
.feature_set
.FeatureSet
.rtype
== RTYPE_SANDISK
) && (g_sandisk_retry_case
< 3))
3964 g_sandisk_retry_case
++;
3965 printk("Sandisk read retry case#%d\n", g_sandisk_retry_case
);
3967 mtd
->ecc_stats
.corrected
= backup_corrected
;
3968 mtd
->ecc_stats
.failed
= backup_failed
;
3969 mtk_nand_rrtry_func(mtd
,devinfo
,feature
,FALSE
);
3970 //if((g_sandisk_retry_case == 0) || (g_sandisk_retry_case == 2))
3972 // mtk_nand_set_command(0x26);
3977 mtk_nand_rrtry_func(mtd
,devinfo
,feature
,TRUE
);
3979 g_sandisk_retry_case
= 0;
3982 if((g_sandisk_retry_case
== 1) || (g_sandisk_retry_case
== 3))
3984 mtk_nand_set_command(0x26);
3985 printk("Case1#3# Set cmd 26\n");
3990 if((retryCount
!= 0) && MLC_DEVICE
)
3992 u32 feature
= devinfo
.feature_set
.FeatureSet
.readRetryDefault
;
3993 mtk_nand_rrtry_func(mtd
,devinfo
,feature
,TRUE
);
3996 g_sandisk_retry_case
= 0;
3998 if(TRUE
== readRetry
)
3999 bRet
= ERR_RTN_SUCCESS
;
4003 u32 feature
= devinfo
.feature_set
.FeatureSet
.readRetryDefault
;
4004 if(bRet
== ERR_RTN_SUCCESS
)
4006 MSG(INIT
, "u4RowAddr:0x%x read retry pass, retrycnt:%d ENUM0:%x,ENUM1:%x,\n",u4RowAddr
,retryCount
,DRV_Reg32(ECC_DECENUM1_REG32
),DRV_Reg32(ECC_DECENUM0_REG32
));
4007 mtd
->ecc_stats
.corrected
++;
4008 if((devinfo
.feature_set
.FeatureSet
.rtype
== RTYPE_HYNIX_16NM
) || (devinfo
.feature_set
.FeatureSet
.rtype
== RTYPE_HYNIX
))
4010 g_hynix_retry_count
--;
4013 mtk_nand_rrtry_func(mtd
,devinfo
,feature
,TRUE
);
4014 g_sandisk_retry_case
= 0;
4016 if (buf
== local_buffer_16_align
)
4017 memcpy(pPageBuf
, buf
, u4PageSize
);
4019 PFM_END_R(pfm_time_read
, u4PageSize
+ 32);
4020 if(bRet
!= ERR_RTN_SUCCESS
)
4022 MSG(INIT
,"ECC uncorrectable , fake buffer returned\n");
4023 memset(pPageBuf
,0xff,u4PageSize
);
4024 memset(pFDMBuf
,0xff,u4SecNum
*8);
4029 /******************************************************************************
4030 * mtk_nand_exec_write_page
4033 * Write a page data !
4036 * struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize,
4037 * u8* pPageBuf, u8* pFDMBuf
4045 ******************************************************************************/
4046 int mtk_nand_exec_write_page(struct mtd_info
*mtd
, u32 u4RowAddr
, u32 u4PageSize
, u8
* pPageBuf
, u8
* pFDMBuf
)
4048 struct nand_chip
*chip
= mtd
->priv
;
4049 u32 u4SecNum
= u4PageSize
>> host
->hw
->nand_sec_shift
;
4052 #ifdef PWR_LOSS_SPOH
4054 struct timeval pl_time_write
;
4055 suseconds_t duration
;
4059 val
= devinfo
.feature_set
.FeatureSet
.readRetryDefault
;
4060 mtk_nand_SetFeature(mtd
, devinfo
.feature_set
.FeatureSet
.sfeatureCmd
,\
4061 devinfo
.feature_set
.FeatureSet
.readRetryAddress
,\
4063 mtk_nand_GetFeature(mtd
, devinfo
.feature_set
.FeatureSet
.gfeatureCmd
,\
4064 devinfo
.feature_set
.FeatureSet
.readRetryAddress
,\
4066 if((val
&0xFF) != (devinfo
.feature_set
.FeatureSet
.readRetryDefault
&0xFF))
4068 MSG(INIT
, "mtk_nand_exec_write_page check read retry defalut value fail 0x%x\n",val
);
4072 //MSG(INIT, "mtk_nand_exec_write_page, page: 0x%x\n", u4RowAddr);
4076 u4RowAddr
= mtk_nand_cs_on(chip
, NFI_TRICKY_CS
, u4RowAddr
);
4080 if(use_randomizer
&& u4RowAddr
>= RAND_START_ADDR
)
4081 { mtk_nand_turn_on_randomizer(u4RowAddr
, 1, 0);}
4082 else if(pre_randomizer
&& u4RowAddr
< RAND_START_ADDR
)
4083 { mtk_nand_turn_on_randomizer(u4RowAddr
, 1, 0);}
4085 #ifdef _MTK_NAND_DUMMY_DRIVER_
4086 if (dummy_driver_debug
)
4088 unsigned long long time
= sched_clock();
4089 if (!((time
* 123 + 59) % 32768))
4091 printk(KERN_INFO
"[NAND_DUMMY_DRIVER] Simulate write error at page: 0x%x\n", u4RowAddr
);
4098 struct timeval pfm_time_write
;
4100 PFM_BEGIN(pfm_time_write
);
4101 if (((u32
) pPageBuf
% 16) && local_buffer_16_align
)
4103 printk(KERN_INFO
"Data buffer not 16 bytes aligned: %p\n", pPageBuf
);
4104 memcpy(local_buffer_16_align
, pPageBuf
, mtd
->writesize
);
4105 buf
= local_buffer_16_align
;
4109 if(virt_addr_valid (pPageBuf
)==0)
4110 { // It should be allocated by vmalloc
4111 memcpy(local_buffer_16_align
, pPageBuf
, mtd
->writesize
);
4112 buf
= local_buffer_16_align
;
4120 if (mtk_nand_ready_for_write(chip
, u4RowAddr
, 0, true, buf
))
4122 mtk_nand_write_fdm_data(chip
, pFDMBuf
, u4SecNum
);
4123 (void)mtk_nand_write_page_data(mtd
, buf
, u4PageSize
);
4124 (void)mtk_nand_check_RW_count(u4PageSize
);
4125 mtk_nand_stop_write();
4126 PL_NAND_BEGIN(pl_time_write
);
4127 PL_TIME_RAND_PROG(chip
, u4RowAddr
, time
);
4128 (void)mtk_nand_set_command(NAND_CMD_PAGEPROG
);
4129 PL_NAND_RESET(time
);
4131 #if CFG_PERFLOG_DEBUG
4132 struct timeval stimer
,etimer
;
4133 do_gettimeofday(&stimer
);
4135 while (DRV_Reg32(NFI_STA_REG32
) & STA_NAND_BUSY
) ;
4136 #if CFG_PERFLOG_DEBUG
4137 do_gettimeofday(&etimer
);
4138 //printk("[Bean]Cal_timediff(&etimer,&stimer):0x%x\n", Cal_timediff(&etimer,&stimer));
4139 g_NandPerfLog
.WriteBusyTotalTime
+= Cal_timediff(&etimer
,&stimer
);
4140 g_NandPerfLog
.WriteBusyCount
++;
4146 printk("[Bean]mtk_nand_ready_for_write fail!\n");
4148 PL_NAND_END(pl_time_write
, duration
);
4149 PL_TIME_PROG(duration
);
4150 PFM_END_W(pfm_time_write
, u4PageSize
+ 32);
4152 if(use_randomizer
&& u4RowAddr
>= RAND_START_ADDR
)
4153 { mtk_nand_turn_off_randomizer();}
4154 else if(pre_randomizer
&& u4RowAddr
< RAND_START_ADDR
)
4155 { mtk_nand_turn_off_randomizer();}
4156 status
= chip
->waitfunc(mtd
, chip
);
4157 //printk("[Bean]status:%d\n", status);
4158 if (status
& NAND_STATUS_FAIL
)
4164 /******************************************************************************
4166 * Write a page to a logical address
4168 *****************************************************************************/
4169 static int mtk_nand_write_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
4170 uint32_t offset
, int data_len
, const uint8_t *buf
,
4171 int oob_required
, int page
, int cached
, int raw
)
4173 // int block_size = 1 << (chip->phys_erase_shift);
4174 int page_per_block
= 1 << (chip
->phys_erase_shift
- chip
->page_shift
);
4178 #if CFG_PERFLOG_DEBUG
4179 struct timeval stimer
,etimer
;
4180 do_gettimeofday(&stimer
);
4182 page_in_block
= mtk_nand_page_transform(mtd
,chip
,page
,&block
,&mapped_block
);
4183 //MSG(INIT,"[WRITE] %d, %d, %d %d\n",mapped_block, block, page_in_block, page_per_block);
4184 // write bad index into oob
4185 if (mapped_block
!= block
)
4187 set_bad_index_to_oob(chip
->oob_poi
, block
);
4190 set_bad_index_to_oob(chip
->oob_poi
, FAKE_INDEX
);
4193 if (mtk_nand_exec_write_page(mtd
, page_in_block
+ mapped_block
* page_per_block
, mtd
->writesize
, (u8
*) buf
, chip
->oob_poi
))
4195 MSG(INIT
, "write fail at block: 0x%x, page: 0x%x\n", mapped_block
, page_in_block
);
4196 if (update_bmt((u64
)((u64
)page_in_block
+ (u64
)mapped_block
* page_per_block
) << chip
->page_shift
, UPDATE_WRITE_FAIL
, (u8
*) buf
, chip
->oob_poi
))
4198 MSG(INIT
, "Update BMT success\n");
4202 MSG(INIT
, "Update BMT fail\n");
4206 #if CFG_PERFLOG_DEBUG
4207 do_gettimeofday(&etimer
);
4208 g_NandPerfLog
.WritePageTotalTime
+= Cal_timediff(&etimer
,&stimer
);
4209 g_NandPerfLog
.WritePageCount
++;
4210 dump_nand_rwcount();
4215 //-------------------------------------------------------------------------------
4217 static void mtk_nand_command_sp(
4218 struct mtd_info *mtd, unsigned int command, int column, int page_addr)
4220 g_u4ColAddr = column;
4221 g_u4RowAddr = page_addr;
4225 case NAND_CMD_STATUS:
4228 case NAND_CMD_READID:
4231 case NAND_CMD_RESET:
4234 case NAND_CMD_RNDOUT:
4235 case NAND_CMD_RNDOUTSTART:
4236 case NAND_CMD_RNDIN:
4237 case NAND_CMD_CACHEDPROG:
4238 case NAND_CMD_STATUS_MULTI:
4246 /******************************************************************************
4247 * mtk_nand_command_bp
4250 * Handle the commands from MTD !
4253 * struct mtd_info *mtd, unsigned int command, int column, int page_addr
4261 ******************************************************************************/
4262 static void mtk_nand_command_bp(struct mtd_info
*mtd
, unsigned int command
, int column
, int page_addr
)
4264 struct nand_chip
*nand
= mtd
->priv
;
4266 struct timeval pfm_time_erase
;
4269 // int block_size = 1 << (nand->phys_erase_shift);
4270 // int page_per_block = 1 << (nand->phys_erase_shift - nand->page_shift);
4272 // u16 page_in_block;
4273 // u32 mapped_block;
4274 // bool rand= FALSE;
4275 page_addr
= mtk_nand_page_transform(mtd
,nand
,&block
,&mapped_block
);
4276 page_addr
= mapped_block
*page_per_block
+ page_addr
;
4280 case NAND_CMD_SEQIN
:
4281 memset(g_kCMD
.au1OOB
, 0xFF, sizeof(g_kCMD
.au1OOB
));
4282 g_kCMD
.pDataBuf
= NULL
;
4284 g_kCMD
.u4RowAddr
= page_addr
;
4285 g_kCMD
.u4ColAddr
= column
;
4288 case NAND_CMD_PAGEPROG
:
4289 if (g_kCMD
.pDataBuf
|| (0xFF != g_kCMD
.au1OOB
[0]))
4291 u8
*pDataBuf
= g_kCMD
.pDataBuf
? g_kCMD
.pDataBuf
: nand
->buffers
->databuf
;
4292 mtk_nand_exec_write_page(mtd
, g_kCMD
.u4RowAddr
, mtd
->writesize
, pDataBuf
, g_kCMD
.au1OOB
);
4293 g_kCMD
.u4RowAddr
= (u32
) - 1;
4294 g_kCMD
.u4OOBRowAddr
= (u32
) - 1;
4298 case NAND_CMD_READOOB
:
4299 g_kCMD
.u4RowAddr
= page_addr
;
4300 g_kCMD
.u4ColAddr
= column
+ mtd
->writesize
;
4302 g_kCMD
.pureReadOOB
= 1;
4303 g_kCMD
.pureReadOOBNum
+= 1;
4307 case NAND_CMD_READ0
:
4308 g_kCMD
.u4RowAddr
= page_addr
;
4309 g_kCMD
.u4ColAddr
= column
;
4311 g_kCMD
.pureReadOOB
= 0;
4315 case NAND_CMD_ERASE1
:
4316 PFM_BEGIN(pfm_time_erase
);
4317 (void)mtk_nand_reset();
4318 mtk_nand_set_mode(CNFG_OP_ERASE
);
4319 (void)mtk_nand_set_command(NAND_CMD_ERASE1
);
4320 (void)mtk_nand_set_address(0, page_addr
, 0, devinfo
.addr_cycle
- 2);
4323 case NAND_CMD_ERASE2
:
4324 (void)mtk_nand_set_command(NAND_CMD_ERASE2
);
4325 while (DRV_Reg32(NFI_STA_REG32
) & STA_NAND_BUSY
) ;
4326 PFM_END_E(pfm_time_erase
);
4329 case NAND_CMD_STATUS
:
4330 (void)mtk_nand_reset();
4331 if(mtk_nand_israndomizeron())
4333 //g_brandstatus = TRUE;
4334 mtk_nand_turn_off_randomizer();
4336 NFI_CLN_REG16(NFI_CNFG_REG16
, CNFG_BYTE_RW
);
4337 mtk_nand_set_mode(CNFG_OP_SRD
);
4338 mtk_nand_set_mode(CNFG_READ_EN
);
4339 NFI_CLN_REG16(NFI_CNFG_REG16
, CNFG_AHB
);
4340 NFI_CLN_REG16(NFI_CNFG_REG16
, CNFG_HW_ECC_EN
);
4341 (void)mtk_nand_set_command(NAND_CMD_STATUS
);
4342 NFI_CLN_REG32(NFI_CON_REG16
, CON_NFI_NOB_MASK
);
4344 DRV_WriteReg32(NFI_CON_REG16
, CON_NFI_SRD
| (1 << CON_NFI_NOB_SHIFT
));
4345 g_bcmdstatus
= true;
4348 case NAND_CMD_RESET
:
4349 (void)mtk_nand_reset();
4352 case NAND_CMD_READID
:
4353 /* Issue NAND chip reset command */
4354 //NFI_ISSUE_COMMAND (NAND_CMD_RESET, 0, 0, 0, 0);
4356 //timeout = TIMEOUT_4;
4362 /* Disable HW ECC */
4363 NFI_CLN_REG16(NFI_CNFG_REG16
, CNFG_HW_ECC_EN
);
4364 NFI_CLN_REG16(NFI_CNFG_REG16
, CNFG_AHB
);
4366 /* Disable 16-bit I/O */
4367 //NFI_CLN_REG16(NFI_PAGEFMT_REG16, PAGEFMT_DBYTE_EN);
4369 NFI_SET_REG16(NFI_CNFG_REG16
, CNFG_READ_EN
| CNFG_BYTE_RW
);
4370 (void)mtk_nand_reset();
4372 mtk_nand_set_mode(CNFG_OP_SRD
);
4373 (void)mtk_nand_set_command(NAND_CMD_READID
);
4374 (void)mtk_nand_set_address(0, 0, 1, 0);
4375 DRV_WriteReg32(NFI_CON_REG16
, CON_NFI_SRD
);
4376 while (DRV_Reg32(NFI_STA_REG32
) & STA_DATAR_STATE
) ;
4385 /******************************************************************************
4386 * mtk_nand_select_chip
4392 * struct mtd_info *mtd, int chip
4400 ******************************************************************************/
4401 static void mtk_nand_select_chip(struct mtd_info
*mtd
, int chip
)
4403 if (chip
== -1 && false == g_bInitDone
)
4405 struct nand_chip
*nand
= mtd
->priv
;
4407 struct mtk_nand_host
*host
= nand
->priv
;
4408 struct mtk_nand_host_hw
*hw
= host
->hw
;
4409 u32 spare_per_sector
= mtd
->oobsize
/( mtd
->writesize
/hw
->nand_sec_size
);
4411 u32 spare_bit
= PAGEFMT_SPARE_16
;
4412 switch(spare_per_sector
)
4414 #ifndef MTK_COMBO_NAND_SUPPORT
4416 spare_bit
= PAGEFMT_SPARE_16
;
4418 spare_per_sector
= 16;
4423 spare_bit
= PAGEFMT_SPARE_26
;
4425 spare_per_sector
= 26;
4429 if(MLC_DEVICE
== TRUE
)
4430 spare_bit
= PAGEFMT_SPARE_32_1KS
;
4432 spare_bit
= PAGEFMT_SPARE_32
;
4433 spare_per_sector
= 32;
4437 spare_bit
= PAGEFMT_SPARE_40
;
4438 spare_per_sector
= 40;
4442 spare_bit
= PAGEFMT_SPARE_44
;
4443 spare_per_sector
= 44;
4448 spare_bit
= PAGEFMT_SPARE_48
;
4449 spare_per_sector
= 48;
4454 spare_bit
= PAGEFMT_SPARE_50
;
4455 spare_per_sector
= 50;
4461 if(MLC_DEVICE
== TRUE
)
4462 spare_bit
= PAGEFMT_SPARE_52_1KS
;
4464 spare_bit
= PAGEFMT_SPARE_52
;
4465 spare_per_sector
= 32;
4471 spare_bit
= PAGEFMT_SPARE_62
;
4472 spare_per_sector
= 62;
4476 if(MLC_DEVICE
== TRUE
)
4477 spare_bit
= PAGEFMT_SPARE_64_1KS
;
4479 spare_bit
= PAGEFMT_SPARE_64
;
4480 spare_per_sector
= 64;
4484 if(MLC_DEVICE
== TRUE
)
4485 spare_bit
= PAGEFMT_SPARE_72_1KS
;
4486 spare_per_sector
= 72;
4490 if(MLC_DEVICE
== TRUE
)
4491 spare_bit
= PAGEFMT_SPARE_80_1KS
;
4492 spare_per_sector
= 80;
4496 if(MLC_DEVICE
== TRUE
)
4497 spare_bit
= PAGEFMT_SPARE_88_1KS
;
4498 spare_per_sector
= 88;
4503 if(MLC_DEVICE
== TRUE
)
4504 spare_bit
= PAGEFMT_SPARE_96_1KS
;
4505 spare_per_sector
= 96;
4511 if(MLC_DEVICE
== TRUE
)
4512 spare_bit
= PAGEFMT_SPARE_100_1KS
;
4513 spare_per_sector
= 100;
4519 if(MLC_DEVICE
== TRUE
)
4520 spare_bit
= PAGEFMT_SPARE_124_1KS
;
4521 spare_per_sector
= 124;
4524 MSG(INIT
, "[NAND]: NFI not support oobsize: %x\n", spare_per_sector
);
4528 mtd
->oobsize
= spare_per_sector
*(mtd
->writesize
/hw
->nand_sec_size
);
4529 printk("[NAND]select ecc bit:%d, sparesize :%d\n",ecc_bit
,mtd
->oobsize
);
4530 /* Setup PageFormat */
4532 if (16384 == mtd
->writesize
)
4534 NFI_SET_REG16(NFI_PAGEFMT_REG16
, (spare_bit
<< PAGEFMT_SPARE_SHIFT
) | PAGEFMT_16K_1KS
);
4535 nand
->cmdfunc
= mtk_nand_command_bp
;
4536 } else if (8192 == mtd
->writesize
)
4538 NFI_SET_REG16(NFI_PAGEFMT_REG16
, (spare_bit
<< PAGEFMT_SPARE_SHIFT
) | PAGEFMT_8K_1KS
);
4539 nand
->cmdfunc
= mtk_nand_command_bp
;
4540 } else if (4096 == mtd
->writesize
)
4542 if(MLC_DEVICE
== FALSE
)
4543 NFI_SET_REG16(NFI_PAGEFMT_REG16
, (spare_bit
<< PAGEFMT_SPARE_SHIFT
) | PAGEFMT_4K
);
4545 NFI_SET_REG16(NFI_PAGEFMT_REG16
, (spare_bit
<< PAGEFMT_SPARE_SHIFT
) | PAGEFMT_4K_1KS
);
4546 nand
->cmdfunc
= mtk_nand_command_bp
;
4547 } else if (2048 == mtd
->writesize
)
4549 if(MLC_DEVICE
== FALSE
)
4550 NFI_SET_REG16(NFI_PAGEFMT_REG16
, (spare_bit
<< PAGEFMT_SPARE_SHIFT
) | PAGEFMT_2K
);
4552 NFI_SET_REG16(NFI_PAGEFMT_REG16
, (spare_bit
<< PAGEFMT_SPARE_SHIFT
) | PAGEFMT_2K_1KS
);
4553 nand
->cmdfunc
= mtk_nand_command_bp
;
4555 ecc_threshold
= ecc_bit
*4/5;
4556 ECC_Config(hw
,ecc_bit
);
4559 //xiaolei for kernel3.10
4560 nand
->ecc
.strength
= ecc_bit
;
4561 mtd
->bitflip_threshold
= nand
->ecc
.strength
;
4568 #ifdef CFG_FPGA_PLATFORM // FPGA NAND is placed at CS1 not CS0
4569 DRV_WriteReg16(NFI_CSEL_REG16
, 0);
4573 DRV_WriteReg16(NFI_CSEL_REG16
, chip
);
4578 /******************************************************************************
4579 * mtk_nand_read_byte
4582 * Read a byte of data !
4585 * struct mtd_info *mtd
4593 ******************************************************************************/
4594 static uint8_t mtk_nand_read_byte(struct mtd_info
*mtd
)
4597 //while(0 == FIFO_RD_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)));
4598 /* Check the PIO bit is ready or not */
4599 u32 timeout
= TIMEOUT_4
;
4601 WAIT_NFI_PIO_READY(timeout
);
4603 retval
= DRV_Reg8(NFI_DATAR_REG32
);
4604 MSG(INIT
, "mtk_nand_read_byte (0x%x)\n", retval
);
4608 NFI_SET_REG16(NFI_CNFG_REG16
, CNFG_AHB
);
4609 NFI_SET_REG16(NFI_CNFG_REG16
, CNFG_HW_ECC_EN
);
4610 g_bcmdstatus
= false;
4617 if (!mtk_nand_pio_ready())
4619 printk("pio ready timeout\n");
4625 retval
= DRV_Reg8(NFI_DATAR_REG32
);
4626 NFI_CLN_REG32(NFI_CON_REG16
, CON_NFI_NOB_MASK
);
4628 #if (__INTERNAL_USE_AHB_MODE__)
4629 NFI_SET_REG16(NFI_CNFG_REG16
, CNFG_AHB
);
4633 NFI_SET_REG16(NFI_CNFG_REG16
, CNFG_HW_ECC_EN
);
4636 NFI_CLN_REG16(NFI_CNFG_REG16
, CNFG_HW_ECC_EN
);
4638 g_bcmdstatus
= false;
4640 retval
= DRV_Reg8(NFI_DATAR_REG32
);
4644 g_brandstatus = FALSE;
4645 mtk_nand_turn_on_randomizer(g_kCMD.u4RowAddr, g_kCMD.u4ColAddr / devinfo.sectorsize, FALSE);
4651 /******************************************************************************
4658 * struct mtd_info *mtd, uint8_t *buf, int len
4666 ******************************************************************************/
4667 static void mtk_nand_read_buf(struct mtd_info
*mtd
, uint8_t * buf
, int len
)
4669 struct nand_chip
*nand
= (struct nand_chip
*)mtd
->priv
;
4670 struct NAND_CMD
*pkCMD
= &g_kCMD
;
4671 u32 u4ColAddr
= pkCMD
->u4ColAddr
;
4672 u32 u4PageSize
= mtd
->writesize
;
4674 if (u4ColAddr
< u4PageSize
)
4676 if ((u4ColAddr
== 0) && (len
>= u4PageSize
))
4678 mtk_nand_exec_read_page(mtd
, pkCMD
->u4RowAddr
, u4PageSize
, buf
, pkCMD
->au1OOB
);
4679 if (len
> u4PageSize
)
4681 u32 u4Size
= min(len
- u4PageSize
, sizeof(pkCMD
->au1OOB
));
4682 memcpy(buf
+ u4PageSize
, pkCMD
->au1OOB
, u4Size
);
4686 mtk_nand_exec_read_page(mtd
, pkCMD
->u4RowAddr
, u4PageSize
, nand
->buffers
->databuf
, pkCMD
->au1OOB
);
4687 memcpy(buf
, nand
->buffers
->databuf
+ u4ColAddr
, len
);
4689 pkCMD
->u4OOBRowAddr
= pkCMD
->u4RowAddr
;
4692 u32 u4Offset
= u4ColAddr
- u4PageSize
;
4693 u32 u4Size
= min(len
- u4Offset
, sizeof(pkCMD
->au1OOB
));
4694 if (pkCMD
->u4OOBRowAddr
!= pkCMD
->u4RowAddr
)
4696 mtk_nand_exec_read_page(mtd
, pkCMD
->u4RowAddr
, u4PageSize
, nand
->buffers
->databuf
, pkCMD
->au1OOB
);
4697 pkCMD
->u4OOBRowAddr
= pkCMD
->u4RowAddr
;
4699 memcpy(buf
, pkCMD
->au1OOB
+ u4Offset
, u4Size
);
4701 pkCMD
->u4ColAddr
+= len
;
4704 /******************************************************************************
4705 * mtk_nand_write_buf
4711 * struct mtd_info *mtd, const uint8_t *buf, int len
4719 ******************************************************************************/
4720 static void mtk_nand_write_buf(struct mtd_info
*mtd
, const uint8_t * buf
, int len
)
4722 struct NAND_CMD
*pkCMD
= &g_kCMD
;
4723 u32 u4ColAddr
= pkCMD
->u4ColAddr
;
4724 u32 u4PageSize
= mtd
->writesize
;
4727 if (u4ColAddr
>= u4PageSize
)
4729 u32 u4Offset
= u4ColAddr
- u4PageSize
;
4730 u8
*pOOB
= pkCMD
->au1OOB
+ u4Offset
;
4731 i4Size
= min(len
, (int)(sizeof(pkCMD
->au1OOB
) - u4Offset
));
4733 for (i
= 0; i
< i4Size
; i
++)
4739 pkCMD
->pDataBuf
= (u8
*) buf
;
4742 pkCMD
->u4ColAddr
+= len
;
4745 /******************************************************************************
4746 * mtk_nand_write_page_hwecc
4749 * Write NAND data with hardware ecc !
4752 * struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf
4760 ******************************************************************************/
4761 static void mtk_nand_write_page_hwecc(struct mtd_info
*mtd
, struct nand_chip
*chip
, const uint8_t *buf
, int oob_required
)
4763 mtk_nand_write_buf(mtd
, buf
, mtd
->writesize
);
4764 mtk_nand_write_buf(mtd
, chip
->oob_poi
, mtd
->oobsize
);
4767 /******************************************************************************
4768 * mtk_nand_read_page_hwecc
4771 * Read NAND data with hardware ecc !
4774 * struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf
4782 ******************************************************************************/
4783 static int mtk_nand_read_page_hwecc(struct mtd_info
*mtd
, struct nand_chip
*chip
,uint8_t *buf
, int oob_required
, int page
)
4786 mtk_nand_read_buf(mtd
, buf
, mtd
->writesize
);
4787 mtk_nand_read_buf(mtd
, chip
->oob_poi
, mtd
->oobsize
);
4789 struct NAND_CMD
*pkCMD
= &g_kCMD
;
4790 u32 u4ColAddr
= pkCMD
->u4ColAddr
;
4791 u32 u4PageSize
= mtd
->writesize
;
4795 mtk_nand_exec_read_page(mtd
, pkCMD
->u4RowAddr
, u4PageSize
, buf
, chip
->oob_poi
);
4796 pkCMD
->u4ColAddr
+= u4PageSize
+ mtd
->oobsize
;
4802 /******************************************************************************
4804 * Read a page to a logical address
4806 *****************************************************************************/
4807 static int mtk_nand_read_page(struct mtd_info
*mtd
, struct nand_chip
*chip
, u8
* buf
, int page
)
4809 // int block_size = 1 << (chip->phys_erase_shift);
4810 int page_per_block
= 1 << (chip
->phys_erase_shift
- chip
->page_shift
);
4811 // int page_per_block1 = page_per_block;
4815 int bRet
= ERR_RTN_SUCCESS
;
4816 #if CFG_PERFLOG_DEBUG
4817 struct timeval stimer
,etimer
;
4818 do_gettimeofday(&stimer
);
4820 page_in_block
= mtk_nand_page_transform(mtd
,chip
,page
,&block
,&mapped_block
);
4821 //MSG(INIT,"[READ] %d, %d, %d %d\n",mapped_block, block, page_in_block, page_per_block);
4823 bRet
= mtk_nand_exec_read_page(mtd
, page_in_block
+ mapped_block
* page_per_block
, mtd
->writesize
, buf
, chip
->oob_poi
);
4824 if (bRet
== ERR_RTN_SUCCESS
)
4826 #if CFG_PERFLOG_DEBUG
4827 do_gettimeofday(&etimer
);
4828 g_NandPerfLog
.ReadPageTotalTime
+= Cal_timediff(&etimer
,&stimer
);
4829 g_NandPerfLog
.ReadPageCount
++;
4830 dump_nand_rwcount();
4840 static int mtk_nand_read_subpage(struct mtd_info
*mtd
, struct nand_chip
*chip
, u8
* buf
, int page
, int subpage
, int subpageno
)
4842 // int block_size = 1 << (chip->phys_erase_shift);
4843 int page_per_block
= 1 << (chip
->phys_erase_shift
- chip
->page_shift
);
4844 // int page_per_block1 = page_per_block;
4849 // bool readRetry = FALSE;
4850 // int retryCount = 0;
4851 int bRet
= ERR_RTN_SUCCESS
;
4852 int sec_num
= 1<<(chip
->page_shift
-host
->hw
->nand_sec_shift
);
4853 int spare_per_sector
= mtd
->oobsize
/sec_num
;
4854 #if CFG_PERFLOG_DEBUG
4855 struct timeval stimer
,etimer
;
4856 do_gettimeofday(&stimer
);
4858 page_in_block
= mtk_nand_page_transform(mtd
,chip
,page
,&block
,&mapped_block
);
4859 coladdr
= subpage
*(devinfo
.sectorsize
+spare_per_sector
);
4860 //coladdr = subpage*(devinfo.sectorsize);
4861 //MSG(INIT,"[Read Subpage] %d, %d, %d %d\n",mapped_block, block, page_in_block, page_per_block);
4863 bRet
= mtk_nand_exec_read_sector(mtd
, page_in_block
+ mapped_block
* page_per_block
, coladdr
, devinfo
.sectorsize
*subpageno
, buf
, chip
->oob_poi
,subpageno
);
4864 //memset(bean_buffer, 0xFF, LPAGE);
4865 //bRet = mtk_nand_exec_read_page(mtd, page, mtd->writesize, bean_buffer, chip->oob_poi);
4866 if (bRet
== ERR_RTN_SUCCESS
)
4868 #if CFG_PERFLOG_DEBUG
4869 do_gettimeofday(&etimer
);
4870 g_NandPerfLog
.ReadSubPageTotalTime
+= Cal_timediff(&etimer
,&stimer
);
4871 g_NandPerfLog
.ReadSubPageCount
++;
4872 dump_nand_rwcount();
4876 //memcpy(buf, bean_buffer+coladdr, mtd->writesize);
4883 /******************************************************************************
4885 * Erase a block at a logical address
4887 *****************************************************************************/
4888 int mtk_nand_erase_hw(struct mtd_info
*mtd
, int page
)
4890 #ifdef PWR_LOSS_SPOH
4891 struct timeval pl_time_write
;
4892 suseconds_t duration
;
4896 struct nand_chip
*chip
= (struct nand_chip
*)mtd
->priv
;
4897 #ifdef _MTK_NAND_DUMMY_DRIVER_
4898 if (dummy_driver_debug
)
4900 unsigned long long time
= sched_clock();
4901 if (!((time
* 123 + 59) % 1024))
4903 printk(KERN_INFO
"[NAND_DUMMY_DRIVER] Simulate erase error at page: 0x%x\n", page
);
4904 return NAND_STATUS_FAIL
;
4911 page
= mtk_nand_cs_on(chip
, NFI_TRICKY_CS
, page
);
4914 PL_NAND_BEGIN(pl_time_write
);
4915 PL_TIME_RAND_ERASE(chip
, page
, time
);
4916 chip
->erase_cmd(mtd
, page
);
4917 PL_NAND_RESET(time
);
4918 result
=chip
->waitfunc(mtd
, chip
);
4919 PL_NAND_END(pl_time_write
, duration
);
4920 PL_TIME_ERASE(duration
);
4924 static int mtk_nand_erase(struct mtd_info
*mtd
, int page
)
4927 struct nand_chip
*chip
= mtd
->priv
;
4928 // int block_size = 1 << (chip->phys_erase_shift);
4929 int page_per_block
= 1 << (chip
->phys_erase_shift
- chip
->page_shift
);
4933 #if CFG_PERFLOG_DEBUG
4934 struct timeval stimer
,etimer
;
4935 do_gettimeofday(&stimer
);
4937 page_in_block
= mtk_nand_page_transform(mtd
,chip
,page
,&block
,&mapped_block
);
4938 //MSG(INIT, "[ERASE] 0x%x 0x%x\n", mapped_block, page);
4939 status
= mtk_nand_erase_hw(mtd
, page_in_block
+ page_per_block
* mapped_block
);
4941 if (status
& NAND_STATUS_FAIL
)
4943 if (update_bmt((u64
)((u64
)page_in_block
+ (u64
)mapped_block
* page_per_block
) << chip
->page_shift
, UPDATE_ERASE_FAIL
, NULL
, NULL
))
4945 MSG(INIT
, "Erase fail at block: 0x%x, update BMT success\n", mapped_block
);
4949 MSG(INIT
, "Erase fail at block: 0x%x, update BMT fail\n", mapped_block
);
4950 return NAND_STATUS_FAIL
;
4953 #if CFG_PERFLOG_DEBUG
4954 do_gettimeofday(&etimer
);
4955 g_NandPerfLog
.EraseBlockTotalTime
+= Cal_timediff(&etimer
,&stimer
);
4956 g_NandPerfLog
.EraseBlockCount
++;
4957 dump_nand_rwcount();
4962 /******************************************************************************
4963 * mtk_nand_read_multi_page_cache
4966 * read multi page data using cache read
4969 * struct mtd_info *mtd, struct nand_chip *chip, int page, struct mtd_oob_ops *ops
4975 * only available for nand flash support cache read.
4976 * read main data only.
4978 *****************************************************************************/
4980 static int mtk_nand_read_multi_page_cache(struct mtd_info
*mtd
, struct nand_chip
*chip
, int page
, struct mtd_oob_ops
*ops
)
4984 struct mtd_ecc_stats stat
= mtd
->ecc_stats
;
4985 uint8_t *buf
= ops
->datbuf
;
4987 if (!mtk_nand_ready_for_read(chip
, page
, 0, true, buf
))
4992 mtk_nand_set_mode(CNFG_OP_CUST
);
4993 DRV_WriteReg32(NFI_CON_REG16
, 8 << CON_NFI_SEC_SHIFT
);
4995 if (len
> mtd
->writesize
) // remained more than one page
4997 if (!mtk_nand_set_command(0x31)) // todo: add cache read command
5001 if (!mtk_nand_set_command(0x3f)) // last page remained
5005 mtk_nand_status_ready(STA_NAND_BUSY
);
5007 #ifdef __INTERNAL_USE_AHB_MODE__
5008 //if (!mtk_nand_dma_read_data(buf, mtd->writesize))
5009 if (!mtk_nand_read_page_data(mtd
, buf
, mtd
->writesize
))
5012 if (!mtk_nand_mcu_read_data(buf
, mtd
->writesize
))
5016 // get ecc error info
5017 mtk_nand_check_bch_error(mtd
, buf
, 3, page
);
5021 len
-= mtd
->writesize
;
5022 buf
+= mtd
->writesize
;
5023 ops
->retlen
+= mtd
->writesize
;
5036 mtk_nand_stop_read();
5041 if (mtd
->ecc_stats
.failed
> stat
.failed
)
5043 printk(KERN_INFO
"ecc fail happened\n");
5047 return mtd
->ecc_stats
.corrected
- stat
.corrected
? -EUCLEAN
: 0;
5051 /******************************************************************************
5052 * mtk_nand_read_oob_raw
5058 * struct mtd_info *mtd, const uint8_t *buf, int addr, int len
5064 * this function read raw oob data out of flash, so need to re-organise
5065 * data format before using.
5066 * len should be times of 8, call this after nand_get_device.
5067 * Should notice, this function read data without ECC protection.
5069 *****************************************************************************/
5070 static int mtk_nand_read_oob_raw(struct mtd_info
*mtd
, uint8_t * buf
, int page_addr
, int len
)
5072 struct nand_chip
*chip
= (struct nand_chip
*)mtd
->priv
;
5076 u32 colnob
= 2, rawnob
= devinfo
.addr_cycle
- 2;
5079 int sec_num
= 1<<(chip
->page_shift
-host
->hw
->nand_sec_shift
);
5080 int spare_per_sector
= mtd
->oobsize
/sec_num
;
5081 u32 sector_size
= NAND_SECTOR_SIZE
;
5082 if(devinfo
.sectorsize
== 1024)
5085 if (len
> NAND_MAX_OOBSIZE
|| len
% OOB_AVAI_PER_SECTOR
|| !buf
)
5087 printk(KERN_WARNING
"[%s] invalid parameter, len: %d, buf: %p\n", __FUNCTION__
, len
, buf
);
5090 if (len
> spare_per_sector
)
5094 if (!randomread
|| !(devinfo
.advancedmode
& RAMDOM_READ
))
5098 read_len
= min(len
, spare_per_sector
);
5099 col_addr
= sector_size
+ sector
* (sector_size
+ spare_per_sector
); // TODO: Fix this hard-code 16
5100 if (!mtk_nand_ready_for_read(chip
, page_addr
, col_addr
, sec_num
, false, NULL
))
5102 printk(KERN_WARNING
"mtk_nand_ready_for_read return failed\n");
5106 if (!mtk_nand_mcu_read_data(buf
+ spare_per_sector
* sector
, read_len
)) // TODO: and this 8
5108 printk(KERN_WARNING
"mtk_nand_mcu_read_data return failed\n");
5112 mtk_nand_stop_read();
5113 //dump_data(buf + 16 * sector,16);
5118 } else //should be 64
5120 col_addr
= sector_size
;
5121 if (chip
->options
& NAND_BUSWIDTH_16
)
5126 if (!mtk_nand_reset())
5131 mtk_nand_set_mode(0x6000);
5132 NFI_SET_REG16(NFI_CNFG_REG16
, CNFG_READ_EN
);
5133 DRV_WriteReg32(NFI_CON_REG16
, 4 << CON_NFI_SEC_SHIFT
);
5135 NFI_CLN_REG16(NFI_CNFG_REG16
, CNFG_AHB
);
5136 NFI_CLN_REG16(NFI_CNFG_REG16
, CNFG_HW_ECC_EN
);
5138 mtk_nand_set_autoformat(false);
5140 if (!mtk_nand_set_command(NAND_CMD_READ0
))
5144 //1 FIXED ME: For Any Kind of AddrCycle
5145 if (!mtk_nand_set_address(col_addr
, page_addr
, colnob
, rawnob
))
5150 if (!mtk_nand_set_command(NAND_CMD_READSTART
))
5154 if (!mtk_nand_status_ready(STA_NAND_BUSY
))
5159 read_len
= min(len
, spare_per_sector
);
5160 if (!mtk_nand_mcu_read_data(buf
+ spare_per_sector
* sector
, read_len
)) // TODO: and this 8
5162 printk(KERN_WARNING
"mtk_nand_mcu_read_data return failed first 16\n");
5168 mtk_nand_stop_read();
5171 read_len
= min(len
, spare_per_sector
);
5172 if (!mtk_nand_set_command(0x05))
5177 col_addr
= sector_size
+ sector
* (sector_size
+ 16); //:TODO_JP careful 16
5178 if (chip
->options
& NAND_BUSWIDTH_16
)
5182 DRV_WriteReg32(NFI_COLADDR_REG32
, col_addr
);
5183 DRV_WriteReg16(NFI_ADDRNOB_REG16
, 2);
5184 DRV_WriteReg32(NFI_CON_REG16
, 4 << CON_NFI_SEC_SHIFT
);
5186 if (!mtk_nand_status_ready(STA_ADDR_STATE
))
5191 if (!mtk_nand_set_command(0xE0))
5195 if (!mtk_nand_status_ready(STA_NAND_BUSY
))
5199 if (!mtk_nand_mcu_read_data(buf
+ spare_per_sector
* sector
, read_len
)) // TODO: and this 8
5201 printk(KERN_WARNING
"mtk_nand_mcu_read_data return failed first 16\n");
5205 mtk_nand_stop_read();
5209 //dump_data(&testbuf[16],16);
5210 //printk(KERN_ERR "\n");
5213 NFI_CLN_REG32(NFI_CON_REG16
, CON_NFI_BRD
);
5217 static int mtk_nand_write_oob_raw(struct mtd_info
*mtd
, const uint8_t * buf
, int page_addr
, int len
)
5219 struct nand_chip
*chip
= mtd
->priv
;
5224 // u32 colnob=2, rawnob=devinfo.addr_cycle-2;
5225 // int randomread =0;
5228 int sec_num
= 1<<(chip
->page_shift
-host
->hw
->nand_sec_shift
);
5229 int spare_per_sector
= mtd
->oobsize
/sec_num
;
5230 u32 sector_size
= NAND_SECTOR_SIZE
;
5231 if(devinfo
.sectorsize
== 1024)
5234 if (len
> NAND_MAX_OOBSIZE
|| len
% OOB_AVAI_PER_SECTOR
|| !buf
)
5236 printk(KERN_WARNING
"[%s] invalid parameter, len: %d, buf: %p\n", __FUNCTION__
, len
, buf
);
5242 write_len
= min(len
, spare_per_sector
);
5243 col_addr
= sector
* (sector_size
+ spare_per_sector
) + sector_size
;
5244 if (!mtk_nand_ready_for_write(chip
, page_addr
, col_addr
, false, NULL
))
5249 if (!mtk_nand_mcu_write_data(mtd
, buf
+ sector
* spare_per_sector
, write_len
))
5254 (void)mtk_nand_check_RW_count(write_len
);
5255 NFI_CLN_REG32(NFI_CON_REG16
, CON_NFI_BWR
);
5256 (void)mtk_nand_set_command(NAND_CMD_PAGEPROG
);
5258 while (DRV_Reg32(NFI_STA_REG32
) & STA_NAND_BUSY
) ;
5260 status
= chip
->waitfunc(mtd
, chip
);
5261 if (status
& NAND_STATUS_FAIL
)
5263 printk(KERN_INFO
"status: %d\n", status
);
5274 static int mtk_nand_write_oob_hw(struct mtd_info
*mtd
, struct nand_chip
*chip
, int page
)
5276 // u8 *buf = chip->oob_poi;
5279 int sec_num
= 1<<(chip
->page_shift
-host
->hw
->nand_sec_shift
);
5280 int spare_per_sector
= mtd
->oobsize
/sec_num
;
5282 memcpy(local_oob_buf
, chip
->oob_poi
, mtd
->oobsize
);
5285 for (i
= 0; i
< chip
->ecc
.layout
->eccbytes
; i
++)
5287 iter
= (i
/ OOB_AVAI_PER_SECTOR
) * spare_per_sector
+ OOB_AVAI_PER_SECTOR
+ i
% OOB_AVAI_PER_SECTOR
;
5288 local_oob_buf
[iter
] = chip
->oob_poi
[chip
->ecc
.layout
->eccpos
[i
]];
5289 // chip->oob_poi[chip->ecc.layout->eccpos[i]] = local_oob_buf[iter];
5293 for (i
= 0; i
< sec_num
; i
++)
5295 memcpy(&local_oob_buf
[i
* spare_per_sector
], &chip
->oob_poi
[i
* OOB_AVAI_PER_SECTOR
], OOB_AVAI_PER_SECTOR
);
5298 return mtk_nand_write_oob_raw(mtd
, local_oob_buf
, page
, mtd
->oobsize
);
5301 static int mtk_nand_write_oob(struct mtd_info
*mtd
, struct nand_chip
*chip
, int page
)
5303 // int block_size = 1 << (chip->phys_erase_shift);
5304 int page_per_block
= 1 << (chip
->phys_erase_shift
- chip
->page_shift
);
5305 // int page_per_block1 = page_per_block;
5310 //block = page / page_per_block1;
5311 //mapped_block = get_mapping_block_index(block);
5312 page_in_block
= mtk_nand_page_transform(mtd
,chip
,page
,&block
,&mapped_block
);
5314 if (mapped_block
!= block
)
5316 set_bad_index_to_oob(chip
->oob_poi
, block
);
5319 set_bad_index_to_oob(chip
->oob_poi
, FAKE_INDEX
);
5322 if (mtk_nand_write_oob_hw(mtd
, chip
, page_in_block
+ mapped_block
* page_per_block
/* page */ ))
5324 MSG(INIT
, "write oob fail at block: 0x%x, page: 0x%x\n", mapped_block
, page_in_block
);
5325 if (update_bmt((u64
)((u64
)page_in_block
+ (u64
)mapped_block
* page_per_block
) << chip
->page_shift
, UPDATE_WRITE_FAIL
, NULL
, chip
->oob_poi
))
5327 MSG(INIT
, "Update BMT success\n");
5331 MSG(INIT
, "Update BMT fail\n");
5339 int mtk_nand_block_markbad_hw(struct mtd_info
*mtd
, loff_t offset
)
5341 struct nand_chip
*chip
= mtd
->priv
;
5342 int block
= (int)(offset
>> chip
->phys_erase_shift
);
5343 int page
= block
* (1 << (chip
->phys_erase_shift
- chip
->page_shift
));
5347 memset(buf
, 0xFF, 8);
5350 ret
= mtk_nand_write_oob_raw(mtd
, buf
, page
, 8);
5354 static int mtk_nand_block_markbad(struct mtd_info
*mtd
, loff_t offset
)
5356 struct nand_chip
*chip
= mtd
->priv
;
5357 u32 block
= (u32
)(offset
>> chip
->phys_erase_shift
);
5358 int page
= block
* (1 << (chip
->phys_erase_shift
- chip
->page_shift
));
5362 nand_get_device(mtd
, FL_WRITING
);
5364 //mapped_block = get_mapping_block_index(block);
5365 page
= mtk_nand_page_transform(mtd
,chip
,page
,&block
,&mapped_block
);
5366 ret
= mtk_nand_block_markbad_hw(mtd
, mapped_block
<< chip
->phys_erase_shift
);
5368 nand_release_device(mtd
);
5373 int mtk_nand_read_oob_hw(struct mtd_info
*mtd
, struct nand_chip
*chip
, int page
)
5378 int sec_num
= 1<<(chip
->page_shift
-host
->hw
->nand_sec_shift
);
5379 int spare_per_sector
= mtd
->oobsize
/sec_num
;
5381 unsigned long long time1
, time2
;
5383 time1
= sched_clock();
5386 if (mtk_nand_read_oob_raw(mtd
, chip
->oob_poi
, page
, mtd
->oobsize
))
5388 // printk(KERN_ERR "[%s]mtk_nand_read_oob_raw return failed\n", __FUNCTION__);
5392 time2
= sched_clock() - time1
;
5396 printk(KERN_ERR
"[%s] time is %llu", __FUNCTION__
, time2
);
5400 // adjust to ecc physical layout to memory layout
5401 /*********************************************************/
5402 /* FDM0 | ECC0 | FDM1 | ECC1 | FDM2 | ECC2 | FDM3 | ECC3 */
5403 /* 8B | 8B | 8B | 8B | 8B | 8B | 8B | 8B */
5404 /*********************************************************/
5406 memcpy(local_oob_buf
, chip
->oob_poi
, mtd
->oobsize
);
5409 for (i
= 0; i
< chip
->ecc
.layout
->eccbytes
; i
++)
5411 iter
= (i
/ OOB_AVAI_PER_SECTOR
) * spare_per_sector
+ OOB_AVAI_PER_SECTOR
+ i
% OOB_AVAI_PER_SECTOR
;
5412 chip
->oob_poi
[chip
->ecc
.layout
->eccpos
[i
]] = local_oob_buf
[iter
];
5416 for (i
= 0; i
< sec_num
; i
++)
5418 memcpy(&chip
->oob_poi
[i
* OOB_AVAI_PER_SECTOR
], &local_oob_buf
[i
* spare_per_sector
], OOB_AVAI_PER_SECTOR
);
5424 static int mtk_nand_read_oob(struct mtd_info
*mtd
, struct nand_chip
*chip
, int page
)
5426 // int block_size = 1 << (chip->phys_erase_shift);
5427 // int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
5429 // u16 page_in_block;
5430 // int mapped_block;
5431 //u8* buf = (u8*)kzalloc(mtd->writesize, GFP_KERNEL);
5433 //page = mtk_nand_page_transform(mtd,chip,page,&block,&mapped_block);
5435 if(block_size
!= mtd
->erasesize
)
5437 page_per_block1
= page_per_block
>>1;
5439 block
= page
/ page_per_block1
;
5440 mapped_block
= get_mapping_block_index(block
);
5441 if(block_size
!= mtd
->erasesize
)
5442 page_in_block
= devinfo
.feature_set
.PairPage
[page
% page_per_block1
];
5444 page_in_block
= page
% page_per_block1
;
5446 mtk_nand_read_oob_hw(mtd
, chip
, page_in_block
+ mapped_block
* page_per_block
);
5448 mtk_nand_read_page(mtd
,chip
,temp_buffer_16_align
,page
);
5452 return 0; // the return value is sndcmd
5455 int mtk_nand_block_bad_hw(struct mtd_info
*mtd
, loff_t ofs
)
5457 struct nand_chip
*chip
= (struct nand_chip
*)mtd
->priv
;
5458 int page_addr
= (int)(ofs
>> chip
->page_shift
);
5459 u32 block
, mapped_block
;
5461 unsigned int page_per_block
= 1 << (chip
->phys_erase_shift
- chip
->page_shift
);
5463 //unsigned char oob_buf[128];
5464 //char* buf = (char*) kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
5466 //page_addr = mtk_nand_page_transform(mtd, chip, page_addr, &block, &mapped_block);
5468 page_addr
&= ~(page_per_block
- 1);
5470 //ret = mtk_nand_read_page(mtd,chip,buf,(ofs >> chip->page_shift));
5471 memset(temp_buffer_16_align
,0xFF,LPAGE
);
5472 ret
= mtk_nand_read_subpage(mtd
,chip
,temp_buffer_16_align
,(ofs
>> chip
->page_shift
),0, 1);
5473 page_addr
= mtk_nand_page_transform(mtd
, chip
, page_addr
, &block
, &mapped_block
);
5474 //ret = mtk_nand_exec_read_page(mtd, page_addr+mapped_block*page_per_block, mtd->writesize, buf, oob_buf);
5477 printk(KERN_WARNING
"mtk_nand_read_oob_raw return error %d\n",ret
);
5482 if (chip
->oob_poi
[0] != 0xff)
5484 printk(KERN_WARNING
"Bad block detected at 0x%x, oob_buf[0] is 0x%x\n", block
*page_per_block
, chip
->oob_poi
[0]);
5490 return 0; // everything is OK, good block
5493 static int mtk_nand_block_bad(struct mtd_info
*mtd
, loff_t ofs
, int getchip
)
5497 struct nand_chip
*chip
= (struct nand_chip
*)mtd
->priv
;
5498 int block
= (int)(ofs
>> chip
->phys_erase_shift
);
5500 int page
= (int)(ofs
>> chip
->page_shift
);
5502 int page_per_block
= 1 << (chip
->phys_erase_shift
- chip
->page_shift
);
5508 chipnr
= (int)(ofs
>> chip
->chip_shift
);
5509 nand_get_device(mtd
, FL_READING
);
5510 /* Select the NAND device */
5511 chip
->select_chip(mtd
, chipnr
);
5513 //page = mtk_nand_page_transform(mtd, chip, page, &block, &mapped_block);
5514 // mapped_block = get_mapping_block_index(block);
5516 ret
= mtk_nand_block_bad_hw(mtd
, ofs
);
5517 page_in_block
= mtk_nand_page_transform(mtd
, chip
, page
, &block
, &mapped_block
);
5521 MSG(INIT
, "Unmapped bad block: 0x%x %d\n", mapped_block
,ret
);
5522 if (update_bmt((u64
)((u64
)page_in_block
+ (u64
)mapped_block
* page_per_block
)<<chip
->page_shift
, UPDATE_UNMAPPED_BLOCK
, NULL
, NULL
))
5524 MSG(INIT
, "Update BMT success\n");
5528 MSG(INIT
, "Update BMT fail\n");
5535 nand_release_device(mtd
);
5540 /******************************************************************************
5541 * mtk_nand_init_size
5544 * initialize the pagesize, oobsize, blocksize
5547 * struct mtd_info *mtd, struct nand_chip *this, u8 *id_data
5555 ******************************************************************************/
5557 static int mtk_nand_init_size(struct mtd_info
*mtd
, struct nand_chip
*this, u8
*id_data
)
5560 mtd
->writesize
= devinfo
.pagesize
;
5563 mtd
->oobsize
= devinfo
.sparesize
;
5565 /* Get blocksize. */
5566 mtd
->erasesize
= devinfo
.blocksize
*1024;
5567 /* Get buswidth information */
5568 if(devinfo
.iowidth
==16)
5570 return NAND_BUSWIDTH_16
;
5579 /******************************************************************************
5580 * mtk_nand_verify_buf
5583 * Verify the NAND write data is correct or not !
5586 * struct mtd_info *mtd, const uint8_t *buf, int len
5594 ******************************************************************************/
5595 #ifdef CONFIG_MTD_NAND_VERIFY_WRITE
5597 char gacBuf
[LPAGE
+ LSPARE
];
5599 static int mtk_nand_verify_buf(struct mtd_info
*mtd
, const uint8_t * buf
, int len
)
5602 struct nand_chip
*chip
= (struct nand_chip
*)mtd
->priv
;
5603 struct NAND_CMD
*pkCMD
= &g_kCMD
;
5604 u32 u4PageSize
= mtd
->writesize
;
5608 mtk_nand_exec_read_page(mtd
, pkCMD
->u4RowAddr
, u4PageSize
, gacBuf
, gacBuf
+ u4PageSize
);
5611 pDst
= (u32
*) gacBuf
;
5612 len
= len
/ sizeof(u32
);
5613 for (i
= 0; i
< len
; ++i
)
5617 MSG(VERIFY
, "mtk_nand_verify_buf page fail at page %d\n", pkCMD
->u4RowAddr
);
5624 pSrc
= (u32
*) chip
->oob_poi
;
5625 pDst
= (u32
*) (gacBuf
+ u4PageSize
);
5627 if ((pSrc
[0] != pDst
[0]) || (pSrc
[1] != pDst
[1]) || (pSrc
[2] != pDst
[2]) || (pSrc
[3] != pDst
[3]) || (pSrc
[4] != pDst
[4]) || (pSrc
[5] != pDst
[5]))
5628 // TODO: Ask Designer Why?
5629 //(pSrc[6] != pDst[6]) || (pSrc[7] != pDst[7]))
5631 MSG(VERIFY
, "mtk_nand_verify_buf oob fail at page %d\n", pkCMD
->u4RowAddr
);
5632 MSG(VERIFY
, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", pSrc
[0], pSrc
[1], pSrc
[2], pSrc
[3], pSrc
[4], pSrc
[5], pSrc
[6], pSrc
[7]);
5633 MSG(VERIFY
, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", pDst
[0], pDst
[1], pDst
[2], pDst
[3], pDst
[4], pDst
[5], pDst
[6], pDst
[7]);
5637 for (i = 0; i < len; ++i) {
5638 if (*pSrc != *pDst) {
5639 printk(KERN_ERR"mtk_nand_verify_buf oob fail at page %d\n", g_kCMD.u4RowAddr);
5646 //printk(KERN_INFO"mtk_nand_verify_buf OK at page %d\n", g_kCMD.u4RowAddr);
5655 /******************************************************************************
5659 * Initial NAND device hardware component !
5662 * struct mtk_nand_host *host (Initial setting data)
5670 ******************************************************************************/
5671 static void mtk_nand_init_hw(struct mtk_nand_host
*host
)
5673 struct mtk_nand_host_hw
*hw
= host
->hw
;
5676 g_bInitDone
= false;
5677 g_kCMD
.u4OOBRowAddr
= (u32
) - 1;
5679 /* Set default NFI access timing control */
5680 DRV_WriteReg32(NFI_ACCCON_REG32
, hw
->nfi_access_timing
);
5681 DRV_WriteReg16(NFI_CNFG_REG16
, 0);
5682 DRV_WriteReg16(NFI_PAGEFMT_REG16
, 4);
5683 DRV_WriteReg32(NFI_ENMPTY_THRESH_REG32
, 40);
5685 /* Reset the state machine and data FIFO, because flushing FIFO */
5686 (void)mtk_nand_reset();
5688 /* Set the ECC engine */
5689 if (hw
->nand_ecc_mode
== NAND_ECC_HW
)
5691 MSG(INIT
, "%s : Use HW ECC\n", MODULE_NAME
);
5694 NFI_SET_REG32(NFI_CNFG_REG16
, CNFG_HW_ECC_EN
);
5696 ECC_Config(host
->hw
,4);
5697 mtk_nand_configure_fdm(8);
5700 /* Initilize interrupt. Clear interrupt, read clear. */
5701 DRV_Reg16(NFI_INTR_REG16
);
5703 /* Interrupt arise when read data or program data to/from AHB is done. */
5704 DRV_WriteReg16(NFI_INTR_EN_REG16
, 0);
5706 // Enable automatic disable ECC clock when NFI is busy state
5707 DRV_WriteReg16(NFI_DEBUG_CON1_REG16
, (NFI_BYPASS
|WBUF_EN
|HWDCM_SWCON_ON
));
5710 host
->saved_para
.suspend_flag
= 0;
5715 //-------------------------------------------------------------------------------
5716 static int mtk_nand_dev_ready(struct mtd_info
*mtd
)
5718 return !(DRV_Reg32(NFI_STA_REG32
) & STA_NAND_BUSY
);
5721 /******************************************************************************
5722 * mtk_nand_proc_read
5725 * Read the proc file to get the interrupt scheme setting !
5728 * char *page, char **start, off_t off, int count, int *eof, void *data
5736 ******************************************************************************/
5737 int mtk_nand_proc_read(struct file
*file
, char *buffer
, size_t count
, loff_t
*ppos
)
5742 p
+= sprintf(p
, "ID:");
5743 for(i
=0;i
<devinfo
.id_length
;i
++){
5744 p
+= sprintf(p
, " 0x%x", devinfo
.id
[i
]);
5746 p
+= sprintf(p
, "\n");
5747 p
+= sprintf(p
, "total size: %dMiB; part number: %s\n", devinfo
.totalsize
,devinfo
.devciename
);
5748 p
+= sprintf(p
, "Current working in %s mode\n", g_i4Interrupt
? "interrupt" : "polling");
5749 p
+= sprintf(p
, "NFI_ACCON(0x%x)=0x%x\n",(NFI_BASE
+0x000C),DRV_Reg32(NFI_ACCCON_REG32
));
5750 p
+= sprintf(p
, "NFI_NAND_TYPE_CNFG_REG32= 0x%x\n",DRV_Reg32(NFI_NAND_TYPE_CNFG_REG32
));
5751 #if CFG_FPGA_PLATFORM
5752 p
+= sprintf(p
, "[FPGA Dummy]DRV_CFG_NFIA(0x0)=0x0\n");
5753 p
+= sprintf(p
, "[FPGA Dummy]DRV_CFG_NFIB(0x0)=0x0\n");
5755 p
+= sprintf(p
, "DRV_CFG_NFIA(IO PAD:0x%x)=0x%x\n",(GPIO_BASE
+0xC20),*((volatile u32
*)(GPIO_BASE
+0xC20)));
5756 p
+= sprintf(p
, "DRV_CFG_NFIB(CTRL PAD:0x%x)=0x%x\n",(GPIO_BASE
+0xB50),*((volatile u32
*)(GPIO_BASE
+0xB50)));
5758 #if CFG_PERFLOG_DEBUG
5759 p
+= sprintf(p
, "Read Page Count:%d, Read Page totalTime:%lu, Avg. RPage:%lu\r\n",
5760 g_NandPerfLog
.ReadPageCount
,g_NandPerfLog
.ReadPageTotalTime
,
5761 g_NandPerfLog
.ReadPageCount
? (g_NandPerfLog
.ReadPageTotalTime
/g_NandPerfLog
.ReadPageCount
): 0);
5763 p
+= sprintf(p
, "Read subPage Count:%d, Read subPage totalTime:%lu, Avg. RPage:%lu\r\n",
5764 g_NandPerfLog
.ReadSubPageCount
,g_NandPerfLog
.ReadSubPageTotalTime
,
5765 g_NandPerfLog
.ReadSubPageCount
? (g_NandPerfLog
.ReadSubPageTotalTime
/g_NandPerfLog
.ReadSubPageCount
): 0);
5767 p
+= sprintf(p
, "Read Busy Count:%d, Read Busy totalTime:%lu, Avg. R Busy:%lu\r\n",
5768 g_NandPerfLog
.ReadBusyCount
,g_NandPerfLog
.ReadBusyTotalTime
,
5769 g_NandPerfLog
.ReadBusyCount
? (g_NandPerfLog
.ReadBusyTotalTime
/g_NandPerfLog
.ReadBusyCount
): 0);
5771 p
+= sprintf(p
, "Read DMA Count:%d, Read DMA totalTime:%lu, Avg. R DMA:%lu\r\n",
5772 g_NandPerfLog
.ReadDMACount
,g_NandPerfLog
.ReadDMATotalTime
,
5773 g_NandPerfLog
.ReadDMACount
? (g_NandPerfLog
.ReadDMATotalTime
/g_NandPerfLog
.ReadDMACount
): 0);
5775 p
+= sprintf(p
, "Write Page Count:%d, Write Page totalTime:%lu, Avg. WPage:%lu\r\n",
5776 g_NandPerfLog
.WritePageCount
,g_NandPerfLog
.WritePageTotalTime
,
5777 g_NandPerfLog
.WritePageCount
? (g_NandPerfLog
.WritePageTotalTime
/g_NandPerfLog
.WritePageCount
): 0);
5779 p
+= sprintf(p
, "Write Busy Count:%d, Write Busy totalTime:%lu, Avg. W Busy:%lu\r\n",
5780 g_NandPerfLog
.WriteBusyCount
,g_NandPerfLog
.WriteBusyTotalTime
,
5781 g_NandPerfLog
.WriteBusyCount
? (g_NandPerfLog
.WriteBusyTotalTime
/g_NandPerfLog
.WriteBusyCount
): 0);
5783 p
+= sprintf(p
, "Write DMA Count:%d, Write DMA totalTime:%lu, Avg. W DMA:%lu\r\n",
5784 g_NandPerfLog
.WriteDMACount
,g_NandPerfLog
.WriteDMATotalTime
,
5785 g_NandPerfLog
.WriteDMACount
? (g_NandPerfLog
.WriteDMATotalTime
/g_NandPerfLog
.WriteDMACount
): 0);
5787 p
+= sprintf(p
, "EraseBlock Count:%d, EraseBlock totalTime:%lu, Avg. Erase:%lu\r\n",
5788 g_NandPerfLog
.EraseBlockCount
,g_NandPerfLog
.EraseBlockTotalTime
,
5789 g_NandPerfLog
.EraseBlockCount
? (g_NandPerfLog
.EraseBlockTotalTime
/g_NandPerfLog
.EraseBlockCount
): 0);
5794 return len
< count
? len
: count
;
5797 /******************************************************************************
5798 * mtk_nand_proc_write
5801 * Write the proc file to set the interrupt scheme !
5804 * struct file* file, const char* buffer, unsigned long count, void *data
5812 ******************************************************************************/
5813 int mtk_nand_proc_write(struct file
*file
, const char *buffer
, unsigned long count
, void *data
)
5815 struct mtd_info
*mtd
= &host
->mtd
;
5819 int len
= count
;//, n;
5821 if (len
>= sizeof(buf
))
5823 len
= sizeof(buf
) - 1;
5826 if (copy_from_user(buf
, buffer
, len
))
5831 sscanf(buf
, "%c%x",&cmd
, &value
);
5835 case 'A': // NFIA driving setting
5836 #if CFG_FPGA_PLATFORM
5837 printk(KERN_INFO
"[FPGA Dummy]NFIA driving setting\n");
5839 if ((value
>= 0x0) && (value
<= 0x7)) // driving step
5841 printk(KERN_INFO
"[NAND]IO PAD driving setting value(0x%x)\n\n", value
);
5842 *((volatile u32
*)(GPIO_BASE
+0xC20)) = value
; //pad 7 6 4 3 0 1 5 8 2
5845 printk(KERN_ERR
"[NAND]IO PAD driving setting value(0x%x) error\n", value
);
5848 case 'B': // NFIB driving setting
5849 #if CFG_FPGA_PLATFORM
5850 printk(KERN_INFO
"[FPGA Dummy]NFIB driving setting\n");
5852 if ((value
>= 0x0) && (value
<= 0x7)) // driving step
5854 printk(KERN_INFO
"[NAND]Ctrl PAD driving setting value(0x%x)\n\n", value
);
5855 *((volatile u32
*)(GPIO_BASE
+0xB50)) = value
; //CLE CE1 CE0 RE RB
5856 *((volatile u32
*)(GPIO_BASE
+0xC10)) = value
; //ALE
5857 *((volatile u32
*)(GPIO_BASE
+0xC00)) = value
; //WE
5860 printk(KERN_ERR
"[NAND]Ctrl PAD driving setting value(0x%x) error\n", value
);
5864 #ifdef _MTK_NAND_DUMMY_DRIVER_
5865 printk(KERN_INFO
"Enable dummy driver\n");
5866 dummy_driver_debug
= 1;
5869 case 'I': // Interrupt control
5870 if ((value
> 0 && !g_i4Interrupt
) || (value
== 0 && g_i4Interrupt
))
5872 nand_get_device(mtd
, FL_READING
);
5874 g_i4Interrupt
= value
;
5878 DRV_Reg16(NFI_INTR_REG16
);
5879 enable_irq(MT_NFI_IRQ_ID
);
5881 disable_irq(MT_NFI_IRQ_ID
);
5883 nand_release_device(mtd
);
5886 case 'P': // Reset Performance monitor counter
5894 g_kCMD
.pureReadOOBNum
= 0;
5897 case 'R': // Reset NFI performance log
5898 #if CFG_PERFLOG_DEBUG
5899 g_NandPerfLog
.ReadPageCount
= 0;
5900 g_NandPerfLog
.ReadPageTotalTime
= 0;
5901 g_NandPerfLog
.ReadBusyCount
= 0;
5902 g_NandPerfLog
.ReadBusyTotalTime
= 0;
5903 g_NandPerfLog
.ReadDMACount
= 0;
5904 g_NandPerfLog
.ReadDMATotalTime
= 0;
5905 g_NandPerfLog
.ReadSubPageCount
= 0;
5906 g_NandPerfLog
.ReadSubPageTotalTime
= 0;
5908 g_NandPerfLog
.WritePageCount
= 0;
5909 g_NandPerfLog
.WritePageTotalTime
= 0;
5910 g_NandPerfLog
.WriteBusyCount
= 0;
5911 g_NandPerfLog
.WriteBusyTotalTime
= 0;
5912 g_NandPerfLog
.WriteDMACount
= 0;
5913 g_NandPerfLog
.WriteDMATotalTime
= 0;
5915 g_NandPerfLog
.EraseBlockCount
= 0;
5916 g_NandPerfLog
.EraseBlockTotalTime
= 0;
5919 case 'T': // ACCCON Setting
5920 nand_get_device(mtd
, FL_READING
);
5921 DRV_WriteReg32(NFI_ACCCON_REG32
,value
);
5922 nand_release_device(mtd
);
5931 #define EFUSE_GPIO_CFG ((volatile u32 *)(0xF02061c0))
5932 #define EFUSE_GPIO_1_8_ENABLE 0x00000008
5933 static unsigned short NFI_gpio_uffs(unsigned short x
)
5963 static void NFI_GPIO_SET_FIELD(U32 reg
, U32 field
, U32 val
)
5965 unsigned short tv
= (unsigned short)(*(volatile u16
*)(reg
));
5967 tv
|= ((val
) << (NFI_gpio_uffs((unsigned short)(field
)) - 1));
5968 (*(volatile u16
*)(reg
) = (u16
)(tv
));
5971 static void mtk_nand_gpio_init(void)
5973 NFI_GPIO_SET_FIELD(GPIO_BASE
+0xc00, 0x700, 0x2); //pullup with 50Kohm ----PAD_MSDC0_CLK for 1.8v/3.3v
5974 NFI_GPIO_SET_FIELD(GPIO_BASE
+0xc10, 0x700, 0x3); //pulldown with 50Kohm ----PAD_MSDC0_CMD for 1.8v/3.3v
5975 NFI_GPIO_SET_FIELD(GPIO_BASE
+0xc30, 0x70, 0x3); //pulldown with 50Kohm ----PAD_MSDC0_DAT1 for 1.8v/3.3v
5976 mt_set_gpio_mode(GPIO46
, GPIO_MODE_06
);
5977 mt_set_gpio_mode(GPIO47
, GPIO_MODE_06
);
5978 mt_set_gpio_mode(GPIO48
, GPIO_MODE_06
);
5979 mt_set_gpio_mode(GPIO49
, GPIO_MODE_06
);
5980 mt_set_gpio_mode(GPIO127
, GPIO_MODE_04
);
5981 mt_set_gpio_mode(GPIO128
, GPIO_MODE_04
);
5982 mt_set_gpio_mode(GPIO129
, GPIO_MODE_04
);
5983 mt_set_gpio_mode(GPIO130
, GPIO_MODE_04
);
5984 mt_set_gpio_mode(GPIO131
, GPIO_MODE_04
);
5985 mt_set_gpio_mode(GPIO132
, GPIO_MODE_04
);
5986 mt_set_gpio_mode(GPIO133
, GPIO_MODE_04
);
5987 mt_set_gpio_mode(GPIO134
, GPIO_MODE_04
);
5988 mt_set_gpio_mode(GPIO135
, GPIO_MODE_04
);
5989 mt_set_gpio_mode(GPIO136
, GPIO_MODE_04
);
5990 mt_set_gpio_mode(GPIO137
, GPIO_MODE_05
);
5991 mt_set_gpio_mode(GPIO142
, GPIO_MODE_01
);
5993 mt_set_gpio_pull_enable(GPIO142
, 1);
5994 mt_set_gpio_pull_select(GPIO142
, 1);
5996 if(!( (*EFUSE_GPIO_CFG
)&EFUSE_GPIO_1_8_ENABLE
)) //3.3v
5999 NFI_GPIO_SET_FIELD(GPIO_BASE
+0xd70, 0xf, 0x0a); /* TDSEL change value to 0x0a*/
6000 NFI_GPIO_SET_FIELD(GPIO_BASE
+0xd70, 0x3f0, 0x0c); /* RDSEL change value to 0x0c*/
6002 NFI_GPIO_SET_FIELD(GPIO_BASE
+0xc60, 0xf, 0x0a); /* TDSEL change value to 0x0a*/
6003 NFI_GPIO_SET_FIELD(GPIO_BASE
+0xc60, 0x3f0, 0x0c); /* RDSEL change value to 0x0c*/
6008 NFI_GPIO_SET_FIELD(GPIO_BASE
+0xd70, 0xf, 0x0a); /* TDSEL change value to 0x0a*/
6009 NFI_GPIO_SET_FIELD(GPIO_BASE
+0xd70, 0x3f0, 0x00); /* RDSEL change value to 0x0c*/
6011 NFI_GPIO_SET_FIELD(GPIO_BASE
+0xc60, 0xf, 0x0a); /* TDSEL change value to 0x0a*/
6012 NFI_GPIO_SET_FIELD(GPIO_BASE
+0xc60, 0x3f0, 0x00); /* RDSEL change value to 0x0c*/
6014 NFI_GPIO_SET_FIELD(GPIO_BASE
+0xc00, 0x7, 0x3); //set CLK driving more than 4mA default:0x3
6015 NFI_GPIO_SET_FIELD(GPIO_BASE
+0xc10, 0x7, 0x3); //set CMD driving more than 4mA
6016 NFI_GPIO_SET_FIELD(GPIO_BASE
+0xc20, 0x7, 0x3); //set DAT driving more than 4mA
6017 NFI_GPIO_SET_FIELD(GPIO_BASE
+0xb50, 0x7, 0x3); //set NFI_PAD driving more than 4mA
6018 DRV_WriteReg32(GPIO_BASE
+0xe20, DRV_Reg32(GPIO_BASE
+0xe20) | 0x5 | (0x5 << 12));//NFI_BIAS_CTRL, temp solution
6019 //DRV_WriteReg32(GPIO_BASE+0x180, 0x7FFF);
6020 //DRV_WriteReg32(GPIO_BASE+0x280, 0x7FDF);
6024 /******************************************************************************
6028 * register the nand device file operations !
6031 * struct platform_device *pdev : device structure
6039 ******************************************************************************/
6040 #define KERNEL_NAND_UNIT_TEST 0
6041 #define NAND_READ_PERFORMANCE 0
6042 #if KERNEL_NAND_UNIT_TEST
6043 int mtk_nand_unit_test(struct nand_chip
*nand_chip
, struct mtd_info
*mtd
)
6045 MSG(INIT
, "Begin to Kernel nand unit test ... \n");
6047 int patternbuff
[128] = {
6048 0x0103D901, 0xFF1802DF, 0x01200400, 0x00000021, 0x02040122, 0x02010122, 0x03020407, 0x1A050103,
6049 0x00020F1B, 0x08C0C0A1, 0x01550800, 0x201B0AC1, 0x41990155, 0x64F0FFFF, 0x201B0C82, 0x4118EA61,
6050 0xF00107F6, 0x0301EE1B, 0x0C834118, 0xEA617001, 0x07760301, 0xEE151405, 0x00202020, 0x20202020,
6051 0x00202020, 0x2000302E, 0x3000FF14, 0x00FF0000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6052 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6053 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6054 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6055 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6056 0x01D90301, 0xDF0218FF, 0x00042001, 0x21000000, 0x22010402, 0x22010102, 0x07040203, 0x0301051A,
6057 0x1B0F0200, 0xA1C0C008, 0x00085501, 0xC10A1B20, 0x55019941, 0xFFFFF064, 0x820C1B20, 0x61EA1841,
6058 0xF60701F0, 0x1BEE0103, 0x1841830C, 0x017061EA, 0x01037607, 0x051415EE, 0x20202000, 0x20202020,
6059 0x20202000, 0x2E300020, 0x14FF0030, 0x0000FF00, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6060 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6061 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6062 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6063 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000
6065 u32 j
, k
, p
= g_block_size
/g_page_size
;
6066 printk("[P] %x\n", p
);
6067 struct gFeatureSet
*feature_set
= &(devinfo
.feature_set
.FeatureSet
);
6068 u32 val
= 0x05, TOTAL
=1000;
6069 for (j
= 0x400; j
< 0x7A0; j
++)
6071 memset(local_buffer
, 0x00, 8192);
6072 mtk_nand_read_page(mtd
, nand_chip
, local_buffer
, j
*p
);
6073 MSG(INIT
,"[1]0x%x %x %x %x\n", *(int *)local_buffer
, *((int *)local_buffer
+1), *((int *)local_buffer
+2), *((int *)local_buffer
+3));
6074 mtk_nand_erase(mtd
, j
*p
);
6075 memset(local_buffer
, 0x00, 8192);
6076 if(mtk_nand_read_page(mtd
, nand_chip
, local_buffer
, j
*p
))
6077 printk("Read page 0x%x fail!\n", j
*p
);
6078 MSG(INIT
,"[2]0x%x %x %x %x\n", *(int *)local_buffer
, *((int *)local_buffer
+1), *((int *)local_buffer
+2), *((int *)local_buffer
+3));
6079 if (mtk_nand_block_bad(mtd
, j
*g_block_size
, 0))
6081 printk("Bad block at %x\n", j
);
6084 for (k
= 0; k
< p
; k
++)
6086 if(mtk_nand_write_page(mtd
, nand_chip
,(u8
*)patternbuff
, j
*p
+k
, 0, 0))
6087 printk("Write page 0x%x fail!\n", j
*p
+k
);
6092 for (k
= 0; k
< p
; k
++)
6095 memset(local_buffer
, 0x00, g_page_size
);
6096 if(mtk_nand_read_page(mtd
, nand_chip
, local_buffer
, j
*p
+k
))
6097 printk("Read page 0x%x fail!\n", j
*p
+k
);
6098 MSG(INIT
,"[3]0x%x %x %x %x\n", *(int *)local_buffer
, *((int *)local_buffer
+1), *((int *)local_buffer
+2), *((int *)local_buffer
+3));
6099 if(memcmp((u8
*)patternbuff
, local_buffer
, 128*4))
6101 MSG(INIT
, "[KERNEL_NAND_UNIT_TEST] compare fail!\n");
6107 MSG(INIT
, "[KERNEL_NAND_UNIT_TEST] compare OK!\n");
6112 mtk_nand_SetFeature(mtd
, (u16
) feature_set
->sfeatureCmd
, \
6113 feature_set
->Async_timing
.address
, (u8
*)&val
,\
6114 sizeof(feature_set
->Async_timing
.feature
));
6115 mtk_nand_GetFeature(mtd
, feature_set
->gfeatureCmd
, \
6116 feature_set
->Async_timing
.address
, (u8
*)&val
,4);
6117 printk("[ASYNC Interface]0x%X\n", val
);
6118 err
= mtk_nand_interface_config(mtd
);
6119 MSG(INIT
, "[nand_interface_config] %d\n",err
);
6127 //#define CHIP_ADDRESS (0x100000)
6128 static int mtk_nand_cs_check(struct mtd_info
*mtd
, u8
*id
, u16 cs
)
6130 u8 ids
[NAND_MAX_ID
];
6132 //if(devinfo.ttarget == TTYPE_2DIE)
6134 // MSG(INIT,"2 Die Flash\n");
6135 // g_bTricky_CS = TRUE;
6138 DRV_WriteReg16(NFI_CSEL_REG16
, cs
);
6139 mtk_nand_command_bp(mtd
, NAND_CMD_READID
, 0, -1);
6140 for(i
=0;i
<NAND_MAX_ID
;i
++)
6142 ids
[i
]=mtk_nand_read_byte(mtd
);
6145 MSG(INIT
, "Nand cs[%d] not support(%d,%x)\n", cs
, i
, ids
[i
]);
6146 DRV_WriteReg16(NFI_CSEL_REG16
, NFI_DEFAULT_CS
);
6151 DRV_WriteReg16(NFI_CSEL_REG16
, NFI_DEFAULT_CS
);
6155 static u32
mtk_nand_cs_on(struct nand_chip
*nand_chip
, u16 cs
, u32 page
)
6157 u32 cs_page
= page
/ g_nanddie_pages
;
6160 DRV_WriteReg16(NFI_CSEL_REG16
, cs
);
6161 //if(devinfo.ttarget == TTYPE_2DIE)
6162 // return page;//return (page | CHIP_ADDRESS);
6163 return (page
- g_nanddie_pages
);
6165 DRV_WriteReg16(NFI_CSEL_REG16
, NFI_DEFAULT_CS
);
6171 #define mtk_nand_cs_check(mtd, id, cs) (1)
6172 #define mtk_nand_cs_on(nand_chip, cs, page) (page)
6175 static int mtk_nand_probe(struct platform_device
*pdev
)
6178 struct mtk_nand_host_hw
*hw
;
6179 struct mtd_info
*mtd
;
6180 struct nand_chip
*nand_chip
;
6181 struct resource
*res
= pdev
->resource
;
6185 u32 sector_size
= NAND_SECTOR_SIZE
;
6190 #ifdef MTK_PMIC_MT6397
6191 hwPowerOn(MT65XX_POWER_LDO_VMCH
, VOL_3300
, "NFI");
6193 hwPowerOn(MT6323_POWER_LDO_VMCH
, VOL_3300
, "NFI");
6196 hw
= (struct mtk_nand_host_hw
*)pdev
->dev
.platform_data
;
6199 if (pdev
->num_resources
!= 4 || res
[0].flags
!= IORESOURCE_MEM
|| res
[1].flags
!= IORESOURCE_MEM
|| res
[2].flags
!= IORESOURCE_IRQ
|| res
[3].flags
!= IORESOURCE_IRQ
)
6201 MSG(INIT
, "%s: invalid resource type\n", __FUNCTION__
);
6205 /* Request IO memory */
6206 if (!request_mem_region(res
[0].start
, res
[0].end
- res
[0].start
+ 1, pdev
->name
))
6210 if (!request_mem_region(res
[1].start
, res
[1].end
- res
[1].start
+ 1, pdev
->name
))
6215 /* Allocate memory for the device structure (and zero it) */
6216 host
= kzalloc(sizeof(struct mtk_nand_host
), GFP_KERNEL
);
6219 MSG(INIT
, "mtk_nand: failed to allocate device structure.\n");
6223 /* Allocate memory for 16 byte aligned buffer */
6224 local_buffer_16_align
= local_buffer
;
6225 temp_buffer_16_align
= temp_buffer
;
6226 //printk(KERN_INFO "Allocate 16 byte aligned buffer: %p\n", local_buffer_16_align);
6231 PL_TIME_PROG_WDT_SET(1);
6232 PL_TIME_ERASE_WDT_SET(1);
6234 /* init mtd data structure */
6235 nand_chip
= &host
->nand_chip
;
6236 nand_chip
->priv
= host
; /* link the private data structures */
6239 mtd
->priv
= nand_chip
;
6240 mtd
->owner
= THIS_MODULE
;
6241 mtd
->name
= "MTK-Nand";
6242 mtd
->eraseregions
= host
->erase_region
;
6244 hw
->nand_ecc_mode
= NAND_ECC_HW
;
6246 /* Set address of NAND IO lines */
6247 nand_chip
->IO_ADDR_R
= (void __iomem
*)NFI_DATAR_REG32
;
6248 nand_chip
->IO_ADDR_W
= (void __iomem
*)NFI_DATAW_REG32
;
6249 nand_chip
->chip_delay
= 20; /* 20us command delay time */
6250 nand_chip
->ecc
.mode
= hw
->nand_ecc_mode
; /* enable ECC */
6252 nand_chip
->read_byte
= mtk_nand_read_byte
;
6253 nand_chip
->read_buf
= mtk_nand_read_buf
;
6254 nand_chip
->write_buf
= mtk_nand_write_buf
;
6255 #ifdef CONFIG_MTD_NAND_VERIFY_WRITE
6256 nand_chip
->verify_buf
= mtk_nand_verify_buf
;
6258 nand_chip
->select_chip
= mtk_nand_select_chip
;
6259 nand_chip
->dev_ready
= mtk_nand_dev_ready
;
6260 nand_chip
->cmdfunc
= mtk_nand_command_bp
;
6261 nand_chip
->ecc
.read_page
= mtk_nand_read_page_hwecc
;
6262 nand_chip
->ecc
.write_page
= mtk_nand_write_page_hwecc
;
6264 nand_chip
->ecc
.layout
= &nand_oob_64
;
6265 nand_chip
->ecc
.size
= hw
->nand_ecc_size
; //2048
6266 nand_chip
->ecc
.bytes
= hw
->nand_ecc_bytes
; //32
6268 nand_chip
->options
= NAND_SKIP_BBTSCAN
;
6270 // For BMT, we need to revise driver architecture
6271 nand_chip
->write_page
= mtk_nand_write_page
;
6272 nand_chip
->read_page
= mtk_nand_read_page
;
6273 nand_chip
->read_subpage
= mtk_nand_read_subpage
;
6274 nand_chip
->ecc
.write_oob
= mtk_nand_write_oob
;
6275 nand_chip
->ecc
.read_oob
= mtk_nand_read_oob
;
6276 nand_chip
->block_markbad
= mtk_nand_block_markbad
; // need to add nand_get_device()/nand_release_device().
6277 nand_chip
->erase
= mtk_nand_erase
;
6278 nand_chip
->block_bad
= mtk_nand_block_bad
;
6279 nand_chip
->init_size
= mtk_nand_init_size
;
6280 #if CFG_FPGA_PLATFORM
6281 MSG(INIT
, "[FPGA Dummy]Enable NFI and NFIECC Clock\n");
6283 //MSG(INIT, "[NAND]Enable NFI and NFIECC Clock\n");
6284 nand_enable_clock();
6286 mtk_nand_gpio_init();
6287 mtk_nand_init_hw(host
);
6288 /* Select the device */
6289 nand_chip
->select_chip(mtd
, NFI_DEFAULT_CS
);
6292 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
6295 nand_chip
->cmdfunc(mtd
, NAND_CMD_RESET
, -1, -1);
6297 /* Send the command for reading device ID */
6298 nand_chip
->cmdfunc(mtd
, NAND_CMD_READID
, 0x00, -1);
6300 for(i
=0;i
<NAND_MAX_ID
;i
++){
6301 id
[i
]=nand_chip
->read_byte(mtd
);
6306 if (!get_device_info(id
,&devinfo
))
6308 MSG(INIT
, "Not Support this Device! \r\n");
6311 if (mtk_nand_cs_check(mtd
, id
, NFI_TRICKY_CS
))
6313 MSG(INIT
, "Twins Nand\n");
6314 g_bTricky_CS
= TRUE
;
6319 if (devinfo
.pagesize
== 16384)
6321 nand_chip
->ecc
.layout
= &nand_oob_128
;
6322 hw
->nand_ecc_size
= 16384;
6323 } else if (devinfo
.pagesize
== 8192)
6325 nand_chip
->ecc
.layout
= &nand_oob_128
;
6326 hw
->nand_ecc_size
= 8192;
6327 } else if (devinfo
.pagesize
== 4096)
6329 nand_chip
->ecc
.layout
= &nand_oob_128
;
6330 hw
->nand_ecc_size
= 4096;
6331 } else if (devinfo
.pagesize
== 2048)
6333 nand_chip
->ecc
.layout
= &nand_oob_64
;
6334 hw
->nand_ecc_size
= 2048;
6335 } else if (devinfo
.pagesize
== 512)
6337 nand_chip
->ecc
.layout
= &nand_oob_16
;
6338 hw
->nand_ecc_size
= 512;
6340 if(devinfo
.sectorsize
== 1024)
6343 hw
->nand_sec_shift
= 10;
6344 hw
->nand_sec_size
= 1024;
6345 NFI_CLN_REG32(NFI_PAGEFMT_REG16
, PAGEFMT_SECTOR_SEL
);
6347 if(devinfo
.pagesize
<= 4096)
6349 nand_chip
->ecc
.layout
->eccbytes
= devinfo
.sparesize
-OOB_AVAI_PER_SECTOR
*(devinfo
.pagesize
/sector_size
);
6350 hw
->nand_ecc_bytes
= nand_chip
->ecc
.layout
->eccbytes
;
6351 // Modify to fit device character
6352 nand_chip
->ecc
.size
= hw
->nand_ecc_size
;
6353 nand_chip
->ecc
.bytes
= hw
->nand_ecc_bytes
;
6357 nand_chip
->ecc
.layout
->eccbytes
= 64;//devinfo.sparesize-OOB_AVAI_PER_SECTOR*(devinfo.pagesize/sector_size);
6358 hw
->nand_ecc_bytes
= nand_chip
->ecc
.layout
->eccbytes
;
6359 // Modify to fit device character
6360 nand_chip
->ecc
.size
= hw
->nand_ecc_size
;
6361 nand_chip
->ecc
.bytes
= hw
->nand_ecc_bytes
;
6363 nand_chip
->subpagesize
= devinfo
.sectorsize
;
6364 nand_chip
->subpage_size
= devinfo
.sectorsize
;
6366 for(i
=0;i
<nand_chip
->ecc
.layout
->eccbytes
;i
++){
6367 nand_chip
->ecc
.layout
->eccpos
[i
]=OOB_AVAI_PER_SECTOR
*(devinfo
.pagesize
/sector_size
)+i
;
6369 //MSG(INIT, "[NAND] pagesz:%d , oobsz: %d,eccbytes: %d\n",
6370 // devinfo.pagesize, sizeof(g_kCMD.au1OOB),nand_chip->ecc.layout->eccbytes);
6373 //MSG(INIT, "Support this Device in MTK table! %x \r\n", id);
6375 if(devinfo
.vendor
!= VEND_NONE
)
6377 if((*EFUSE_RANDOM_CFG
)&EFUSE_RANDOM_ENABLE
)
6379 MSG(INIT
, "[NAND]EFUSE RANDOM CFG is ON\n");
6380 use_randomizer
= TRUE
;
6381 pre_randomizer
= TRUE
;
6385 MSG(INIT
, "[NAND]EFUSE RANDOM CFG is OFF\n");
6386 use_randomizer
= FALSE
;
6387 pre_randomizer
= FALSE
;
6392 if((devinfo
.feature_set
.FeatureSet
.rtype
== RTYPE_HYNIX_16NM
) || (devinfo
.feature_set
.FeatureSet
.rtype
== RTYPE_HYNIX
))
6393 HYNIX_RR_TABLE_READ(&devinfo
);
6395 hw
->nfi_bus_width
= devinfo
.iowidth
;
6397 if(devinfo
.vendor
== VEND_MICRON
)
6399 if(devinfo
.feature_set
.FeatureSet
.Async_timing
.feature
!= 0xFF)
6401 struct gFeatureSet
*feature_set
= &(devinfo
.feature_set
.FeatureSet
);
6403 mtk_nand_SetFeature(mtd
, (u16
) feature_set
->sfeatureCmd
, \
6404 feature_set
->Async_timing
.address
, (u8
*)(&feature_set
->Async_timing
.feature
),\
6405 sizeof(feature_set
->Async_timing
.feature
));
6406 //mtk_nand_GetFeature(mtd, feature_set->gfeatureCmd, \
6407 //feature_set->Async_timing.address, (u8 *)(&val),4);
6408 //printk("[ASYNC Interface]0x%X\n", val);
6412 DRV_WriteReg16(NFI_CSEL_REG16
, NFI_TRICKY_CS
);
6413 mtk_nand_SetFeature(mtd
, (u16
) feature_set
->sfeatureCmd
, \
6414 feature_set
->Async_timing
.address
, (u8
*)(&feature_set
->Async_timing
.feature
),\
6415 sizeof(feature_set
->Async_timing
.feature
));
6416 DRV_WriteReg16(NFI_CSEL_REG16
, NFI_DEFAULT_CS
);
6422 //MSG(INIT, "AHB Clock(0x%x) ",DRV_Reg32(PERICFG_BASE+0x5C));
6423 //DRV_WriteReg32(PERICFG_BASE+0x5C, 0x1);
6424 //MSG(INIT, "AHB Clock(0x%x)",DRV_Reg32(PERICFG_BASE+0x5C));
6425 DRV_WriteReg32(NFI_ACCCON_REG32
, devinfo
.timmingsetting
);
6426 //MSG(INIT, "Kernel Nand Timing:0x%x!\n", DRV_Reg32(NFI_ACCCON_REG32));
6428 /* 16-bit bus width */
6429 if (hw
->nfi_bus_width
== 16)
6431 MSG(INIT
, "%s : Set the 16-bit I/O settings!\n", MODULE_NAME
);
6432 nand_chip
->options
|= NAND_BUSWIDTH_16
;
6434 mt_irq_set_sens(MT_NFI_IRQ_ID
, MT65xx_LEVEL_SENSITIVE
);
6435 mt_irq_set_polarity(MT_NFI_IRQ_ID
, MT65xx_POLARITY_LOW
);
6436 err
= request_irq(MT_NFI_IRQ_ID
, mtk_nand_irq_handler
, IRQF_DISABLED
, "mtk-nand", NULL
);
6440 MSG(INIT
, "%s : Request IRQ fail: err = %d\n", MODULE_NAME
, err
);
6445 enable_irq(MT_NFI_IRQ_ID
);
6447 disable_irq(MT_NFI_IRQ_ID
);
6450 if (devinfo
.advancedmode
& CACHE_READ
)
6452 nand_chip
->ecc
.read_multi_page_cache
= NULL
;
6453 // nand_chip->ecc.read_multi_page_cache = mtk_nand_read_multi_page_cache;
6454 // MSG(INIT, "Device %x support cache read \r\n",id);
6456 nand_chip
->ecc
.read_multi_page_cache
= NULL
;
6458 mtd
->oobsize
= devinfo
.sparesize
;
6459 /* Scan to find existance of the device */
6460 if (nand_scan(mtd
, hw
->nfi_cs_num
))
6462 MSG(INIT
, "%s : nand_scan fail.\n", MODULE_NAME
);
6467 g_page_size
= mtd
->writesize
;
6468 g_block_size
= devinfo
.blocksize
<< 10;
6469 PAGES_PER_BLOCK
= (u32
)(g_block_size
/ g_page_size
);
6470 //MSG(INIT, "g_page_size(%d) g_block_size(%d)\n",g_page_size, g_block_size);
6472 g_nanddie_pages
= (u32
)(nand_chip
->chipsize
>> nand_chip
->page_shift
);
6473 //if(devinfo.ttarget == TTYPE_2DIE)
6475 // g_nanddie_pages = g_nanddie_pages / 2;
6479 nand_chip
->chipsize
<<= 1;
6480 //MSG(INIT, "[Bean]%dMB\n", (u32)(nand_chip->chipsize/1024/1024));
6482 //MSG(INIT, "[Bean]g_nanddie_pages %x\n", g_nanddie_pages);
6485 #ifdef PART_SIZE_BMTPOOL
6486 if (PART_SIZE_BMTPOOL
)
6488 bmt_sz
= (PART_SIZE_BMTPOOL
) >> nand_chip
->phys_erase_shift
;
6492 bmt_sz
= (int)(((u32
)(nand_chip
->chipsize
>> nand_chip
->phys_erase_shift
))/100*6);
6494 //if (manu_id == 0x45)
6496 // bmt_sz = bmt_sz * 2;
6499 platform_set_drvdata(pdev
, host
);
6501 if (hw
->nfi_bus_width
== 16)
6503 NFI_SET_REG16(NFI_PAGEFMT_REG16
, PAGEFMT_DBYTE_EN
);
6506 nand_chip
->select_chip(mtd
, 0);
6507 #if defined(MTK_COMBO_NAND_SUPPORT)
6509 nand_chip
->chipsize
-= (bmt_sz
* g_block_size
);
6511 nand_chip
->chipsize
-= (PART_SIZE_BMTPOOL
);
6516 // nand_chip->chipsize -= (PART_SIZE_BMTPOOL); // if 2CS nand need cut down again
6520 nand_chip
->chipsize
-= (BMT_POOL_SIZE
) << nand_chip
->phys_erase_shift
;
6522 mtd
->size
= nand_chip
->chipsize
;
6523 #if NAND_READ_PERFORMANCE
6524 struct timeval stimer
,etimer
;
6525 do_gettimeofday(&stimer
);
6526 for (i
= 256; i
< 512; i
++)
6528 mtk_nand_read_page(mtd
, nand_chip
, local_buffer
, i
);
6529 MSG(INIT
,"[%d]0x%x %x %x %x\n",i
, *(int *)local_buffer
, *((int *)local_buffer
+1), *((int *)local_buffer
+2), *((int *)local_buffer
+3));
6530 MSG(INIT
,"[%d]0x%x %x %x %x\n",i
, *(int *)local_buffer
+4, *((int *)local_buffer
+5), *((int *)local_buffer
+6), *((int *)local_buffer
+7));
6531 MSG(INIT
,"[%d]0x%x %x %x %x\n",i
, *(int *)local_buffer
+8, *((int *)local_buffer
+9), *((int *)local_buffer
+10), *((int *)local_buffer
+11));
6532 MSG(INIT
,"[%d]0x%x %x %x %x\n",i
, *(int *)local_buffer
+12, *((int *)local_buffer
+13), *((int *)local_buffer
+14), *((int *)local_buffer
+15));
6534 do_gettimeofday(&etimer
);
6535 printk("[NAND Read Perf.Test] %ld MB/s\n", (g_page_size
*256)/Cal_timediff(&etimer
,&stimer
));
6538 if(devinfo
.vendor
!= VEND_NONE
)
6540 err
= mtk_nand_interface_config(mtd
);
6544 DRV_WriteReg16(NFI_CSEL_REG16
, NFI_TRICKY_CS
);
6545 err
= mtk_nand_interface_config(mtd
);
6546 DRV_WriteReg16(NFI_CSEL_REG16
, NFI_DEFAULT_CS
);
6549 //MSG(INIT, "[nand_interface_config] %d\n",err);
6551 //for (regp = 0xF0206000; regp <= 0xF020631C; regp+=4)
6552 // printk("[%08X]0x%08X\n", regp, DRV_Reg32(regp));
6553 #if NAND_READ_PERFORMANCE
6554 do_gettimeofday(&stimer
);
6555 for (i
= 256; i
< 512; i
++)
6557 mtk_nand_read_page(mtd
, nand_chip
, local_buffer
, i
);
6558 MSG(INIT
,"[%d]0x%x %x %x %x\n",i
, *(int *)local_buffer
, *((int *)local_buffer
+1), *((int *)local_buffer
+2), *((int *)local_buffer
+3));
6559 MSG(INIT
,"[%d]0x%x %x %x %x\n",i
, *(int *)local_buffer
+4, *((int *)local_buffer
+5), *((int *)local_buffer
+6), *((int *)local_buffer
+7));
6560 MSG(INIT
,"[%d]0x%x %x %x %x\n",i
, *(int *)local_buffer
+8, *((int *)local_buffer
+9), *((int *)local_buffer
+10), *((int *)local_buffer
+11));
6561 MSG(INIT
,"[%d]0x%x %x %x %x\n",i
, *(int *)local_buffer
+12, *((int *)local_buffer
+13), *((int *)local_buffer
+14), *((int *)local_buffer
+15));
6563 do_gettimeofday(&etimer
);
6564 printk("[NAND Read Perf.Test] %d MB/s\n", (g_page_size
*256)/Cal_timediff(&etimer
,&stimer
));
6571 #if defined(MTK_COMBO_NAND_SUPPORT)
6573 if (!(g_bmt
= init_bmt(nand_chip
, bmt_sz
)))
6575 if (!(g_bmt
= init_bmt(nand_chip
, ((PART_SIZE_BMTPOOL
) >> nand_chip
->phys_erase_shift
))))
6578 if (!(g_bmt
= init_bmt(nand_chip
, BMT_POOL_SIZE
)))
6581 MSG(INIT
, "Error: init bmt failed\n");
6586 nand_chip
->chipsize
-= (PMT_POOL_SIZE
) << nand_chip
->phys_erase_shift
;
6587 mtd
->size
= nand_chip
->chipsize
;
6588 #if KERNEL_NAND_UNIT_TEST
6589 err
= mtk_nand_unit_test(nand_chip
, mtd
);
6592 printk("Thanks to GOD, UNIT Test OK!\n");
6596 part_init_pmt(mtd
, (u8
*) & g_exist_Partition
[0]);
6597 err
= mtd_device_register(mtd
, g_exist_Partition
, part_num
);
6599 err
= mtd_device_register(mtd
, g_pasStatic_Partition
, part_num
);
6602 #ifdef _MTK_NAND_DUMMY_DRIVER_
6603 dummy_driver_debug
= 0;
6606 /* Successfully!! */
6609 //MSG(INIT, "[mtk_nand] probe successfully!\n");
6610 nand_disable_clock();
6616 MSG(INIT
, "[NFI] mtk_nand_probe fail, err = %d!\n", err
);
6618 platform_set_drvdata(pdev
, NULL
);
6620 nand_disable_clock();
6623 /******************************************************************************
6627 * Suspend the nand device!
6630 * struct platform_device *pdev : device structure
6638 ******************************************************************************/
6639 static int mtk_nand_suspend(struct platform_device
*pdev
, pm_message_t state
)
6641 struct mtk_nand_host
*host
= platform_get_drvdata(pdev
);
6642 // struct mtd_info *mtd = &host->mtd;
6646 if(host
->saved_para
.suspend_flag
==0)
6648 nand_enable_clock();
6649 // Save NFI register
6650 host
->saved_para
.sNFI_CNFG_REG16
= DRV_Reg16(NFI_CNFG_REG16
);
6651 host
->saved_para
.sNFI_PAGEFMT_REG16
= DRV_Reg16(NFI_PAGEFMT_REG16
);
6652 host
->saved_para
.sNFI_CON_REG16
= DRV_Reg32(NFI_CON_REG16
);
6653 host
->saved_para
.sNFI_ACCCON_REG32
= DRV_Reg32(NFI_ACCCON_REG32
);
6654 host
->saved_para
.sNFI_INTR_EN_REG16
= DRV_Reg16(NFI_INTR_EN_REG16
);
6655 host
->saved_para
.sNFI_IOCON_REG16
= DRV_Reg16(NFI_IOCON_REG16
);
6656 host
->saved_para
.sNFI_CSEL_REG16
= DRV_Reg16(NFI_CSEL_REG16
);
6657 host
->saved_para
.sNFI_DEBUG_CON1_REG16
= DRV_Reg16(NFI_DEBUG_CON1_REG16
);
6659 // save ECC register
6660 host
->saved_para
.sECC_ENCCNFG_REG32
= DRV_Reg32(ECC_ENCCNFG_REG32
);
6661 // host->saved_para.sECC_FDMADDR_REG32 = DRV_Reg32(ECC_FDMADDR_REG32);
6662 host
->saved_para
.sECC_DECCNFG_REG32
= DRV_Reg32(ECC_DECCNFG_REG32
);
6664 if (g_bSyncOrToggle
)
6666 host
->saved_para
.sNFI_DLYCTRL_REG32
= DRV_Reg32(NFI_DLYCTRL_REG32
);
6667 host
->saved_para
.sPERI_NFI_MAC_CTRL
= DRV_Reg32(PERI_NFI_MAC_CTRL
);
6668 host
->saved_para
.sNFI_NAND_TYPE_CNFG_REG32
= DRV_Reg32(NFI_NAND_TYPE_CNFG_REG32
);
6669 host
->saved_para
.sNFI_ACCCON1_REG32
= DRV_Reg32(NFI_ACCCON1_REG3
);
6671 #ifdef MTK_PMIC_MT6397
6672 hwPowerDown(MT65XX_POWER_LDO_VMCH
, "NFI");
6674 hwPowerDown(MT6323_POWER_LDO_VMCH
, "NFI");
6676 nand_disable_clock();
6677 host
->saved_para
.suspend_flag
=1;
6681 MSG(INIT
, "[NFI] Suspend twice !\n");
6685 MSG(INIT
, "[NFI] Suspend !\n");
6689 /******************************************************************************
6693 * Resume the nand device!
6696 * struct platform_device *pdev : device structure
6704 ******************************************************************************/
6705 static int mtk_nand_resume(struct platform_device
*pdev
)
6707 struct mtk_nand_host
*host
= platform_get_drvdata(pdev
);
6708 //struct mtd_info *mtd = &host->mtd; //for test
6709 // struct nand_chip *chip = mtd->priv;
6710 //struct gFeatureSet *feature_set = &(devinfo.feature_set.FeatureSet); //for test
6711 //int val = -1; // for test
6712 //[BUGFIX]-Add-BEGIN by SCDTABLET.(lilin.liu@jrdcom.com), PR981151,981152 04/24/2015
6715 //[BUGFIX]-Add-END by SCDTABLET.(lilin.liu@jrdcom.com)
6719 if(host
->saved_para
.suspend_flag
==1)
6721 nand_enable_clock();
6722 // restore NFI register
6723 #ifdef MTK_PMIC_MT6397
6724 hwPowerOn(MT65XX_POWER_LDO_VMCH
, VOL_3300
, "NFI");
6726 hwPowerOn(MT6323_POWER_LDO_VMCH
, VOL_3300
, "NFI");
6729 //[BUGFIX]-Add-BEGIN by SCDTABLET.(lilin.liu@jrdcom.com), PR981151,981152 04/24/2015
6730 MSG(INIT
, "[NFI] Resume Add extera 1ms Delay and wait for device reset ready !\n");
6733 ret
= mtk_nand_device_reset();
6737 udelay(100); //total 200ms polling nand reset status
6741 MSG(INIT
, "[NFI] Resume Error, device reset failed here!\n");
6743 //[BUGFIX]-Add-END by SCDTABLET.(lilin.liu@jrdcom.com)
6744 DRV_WriteReg16(NFI_CNFG_REG16
,host
->saved_para
.sNFI_CNFG_REG16
);
6745 DRV_WriteReg16(NFI_PAGEFMT_REG16
,host
->saved_para
.sNFI_PAGEFMT_REG16
);
6746 DRV_WriteReg32(NFI_CON_REG16
,host
->saved_para
.sNFI_CON_REG16
);
6747 DRV_WriteReg32(NFI_ACCCON_REG32
,host
->saved_para
.sNFI_ACCCON_REG32
);
6748 DRV_WriteReg16(NFI_IOCON_REG16
,host
->saved_para
.sNFI_IOCON_REG16
);
6749 DRV_WriteReg16(NFI_CSEL_REG16
,host
->saved_para
.sNFI_CSEL_REG16
);
6750 DRV_WriteReg16(NFI_DEBUG_CON1_REG16
,host
->saved_para
.sNFI_DEBUG_CON1_REG16
);
6752 // restore ECC register
6753 DRV_WriteReg32(ECC_ENCCNFG_REG32
,host
->saved_para
.sECC_ENCCNFG_REG32
);
6754 // DRV_WriteReg32(ECC_FDMADDR_REG32 ,host->saved_para.sECC_FDMADDR_REG32);
6755 DRV_WriteReg32(ECC_DECCNFG_REG32
,host
->saved_para
.sECC_DECCNFG_REG32
);
6757 // Reset NFI and ECC state machine
6758 /* Reset the state machine and data FIFO, because flushing FIFO */
6759 (void)mtk_nand_reset();
6761 DRV_WriteReg16(ECC_DECCON_REG16
, DEC_DE
);
6762 while (!DRV_Reg16(ECC_DECIDLE_REG16
));
6764 DRV_WriteReg16(ECC_ENCCON_REG16
, ENC_DE
);
6765 while (!DRV_Reg32(ECC_ENCIDLE_REG32
));
6768 /* Initilize interrupt. Clear interrupt, read clear. */
6769 DRV_Reg16(NFI_INTR_REG16
);
6771 DRV_WriteReg16(NFI_INTR_EN_REG16
,host
->saved_para
.sNFI_INTR_EN_REG16
);
6773 //mtk_nand_interface_config(&host->mtd);
6774 if (g_bSyncOrToggle
)
6776 NFI_CLN_REG32(NFI_DEBUG_CON1_REG16
,HWDCM_SWCON_ON
);
6777 NFI_CLN_REG32(NFI_DEBUG_CON1_REG16
,NFI_BYPASS
);
6778 NFI_CLN_REG32(ECC_BYPASS_REG32
,ECC_BYPASS
);
6779 DRV_WriteReg32(PERICFG_BASE
+0x5C, 0x0);
6780 NFI_SET_REG32(PERI_NFI_CLK_SOURCE_SEL
, NFI_PAD_1X_CLOCK
);
6781 clkmux_sel(MT_MUX_NFI2X
,g_iNFI2X_CLKSRC
,"NFI");
6782 DRV_WriteReg32(NFI_DLYCTRL_REG32
, host
->saved_para
.sNFI_DLYCTRL_REG32
);
6783 DRV_WriteReg32(PERI_NFI_MAC_CTRL
, host
->saved_para
.sPERI_NFI_MAC_CTRL
);
6784 while(0 == (DRV_Reg32(NFI_STA_REG32
) && STA_FLASH_MACRO_IDLE
));
6785 DRV_WriteReg16(NFI_NAND_TYPE_CNFG_REG32
, host
->saved_para
.sNFI_NAND_TYPE_CNFG_REG32
);
6786 DRV_WriteReg32(NFI_ACCCON1_REG3
,host
->saved_para
.sNFI_ACCCON1_REG32
);
6788 //mtk_nand_GetFeature(mtd, feature_set->gfeatureCmd, \
6789 //feature_set->Interface.address, (u8 *)&val,4);
6790 //MSG(POWERCTL, "[NFI] Resume feature %d!\n", val);
6791 nand_disable_clock();
6792 host
->saved_para
.suspend_flag
= 0;
6796 MSG(INIT
, "[NFI] Resume twice !\n");
6799 MSG(INIT
, "[NFI] Resume !\n");
6803 /******************************************************************************
6807 * unregister the nand device file operations !
6810 * struct platform_device *pdev : device structure
6818 ******************************************************************************/
6820 static int mtk_nand_remove(struct platform_device
*pdev
)
6822 struct mtk_nand_host
*host
= platform_get_drvdata(pdev
);
6823 struct mtd_info
*mtd
= &host
->mtd
;
6829 nand_disable_clock();
6834 /******************************************************************************
6835 * NAND OTP operations
6836 * ***************************************************************************/
6837 #if (defined(NAND_OTP_SUPPORT) && SAMSUNG_OTP_SUPPORT)
6838 unsigned int samsung_OTPQueryLength(unsigned int *QLength
)
6840 *QLength
= SAMSUNG_OTP_PAGE_NUM
* g_page_size
;
6844 unsigned int samsung_OTPRead(unsigned int PageAddr
, void *BufferPtr
, void *SparePtr
)
6846 struct mtd_info
*mtd
= &host
->mtd
;
6847 unsigned int rowaddr
, coladdr
;
6848 unsigned int u4Size
= g_page_size
;
6849 unsigned int timeout
= 0xFFFF;
6851 unsigned int sec_num
= mtd
->writesize
>> host
->hw
->nand_sec_shift
;
6853 if (PageAddr
>= SAMSUNG_OTP_PAGE_NUM
)
6855 return OTP_ERROR_OVERSCOPE
;
6858 /* Col -> Row; LSB first */
6859 coladdr
= 0x00000000;
6860 rowaddr
= Samsung_OTP_Page
[PageAddr
];
6862 MSG(OTP
, "[%s]:(COLADDR) [0x%08x]/(ROWADDR)[0x%08x]\n", __func__
, coladdr
, rowaddr
);
6864 /* Power on NFI HW component. */
6865 nand_get_device(mtd
, FL_READING
);
6867 (void)mtk_nand_set_command(0x30);
6869 (void)mtk_nand_set_command(0x65);
6871 MSG(OTP
, "[%s]: Start to read data from OTP area\n", __func__
);
6873 if (!mtk_nand_reset())
6875 bRet
= OTP_ERROR_RESET
;
6879 mtk_nand_set_mode(CNFG_OP_READ
);
6880 NFI_SET_REG16(NFI_CNFG_REG16
, CNFG_READ_EN
);
6881 DRV_WriteReg32(NFI_CON_REG16
, sec_num
<< CON_NFI_SEC_SHIFT
);
6883 DRV_WriteReg32(NFI_STRADDR_REG32
, __virt_to_phys(BufferPtr
));
6884 NFI_SET_REG16(NFI_CNFG_REG16
, CNFG_AHB
);
6888 NFI_SET_REG16(NFI_CNFG_REG16
, CNFG_HW_ECC_EN
);
6891 NFI_CLN_REG16(NFI_CNFG_REG16
, CNFG_HW_ECC_EN
);
6893 mtk_nand_set_autoformat(true);
6898 if (!mtk_nand_set_command(NAND_CMD_READ0
))
6900 bRet
= OTP_ERROR_BUSY
;
6904 if (!mtk_nand_set_address(coladdr
, rowaddr
, 2, 3))
6906 bRet
= OTP_ERROR_BUSY
;
6910 if (!mtk_nand_set_command(NAND_CMD_READSTART
))
6912 bRet
= OTP_ERROR_BUSY
;
6916 if (!mtk_nand_status_ready(STA_NAND_BUSY
))
6918 bRet
= OTP_ERROR_BUSY
;
6922 if (!mtk_nand_read_page_data(mtd
, BufferPtr
, u4Size
))
6924 bRet
= OTP_ERROR_BUSY
;
6928 if (!mtk_nand_status_ready(STA_NAND_BUSY
))
6930 bRet
= OTP_ERROR_BUSY
;
6934 mtk_nand_read_fdm_data(SparePtr
, sec_num
);
6936 mtk_nand_stop_read();
6938 MSG(OTP
, "[%s]: End to read data from OTP area\n", __func__
);
6945 (void)mtk_nand_set_command(0xFF);
6946 nand_release_device(mtd
);
6950 unsigned int samsung_OTPWrite(unsigned int PageAddr
, void *BufferPtr
, void *SparePtr
)
6952 struct mtd_info
*mtd
= &host
->mtd
;
6953 unsigned int rowaddr
, coladdr
;
6954 unsigned int u4Size
= g_page_size
;
6955 unsigned int timeout
= 0xFFFF;
6957 unsigned int sec_num
= mtd
->writesize
>> 9;
6959 if (PageAddr
>= SAMSUNG_OTP_PAGE_NUM
)
6961 return OTP_ERROR_OVERSCOPE
;
6964 /* Col -> Row; LSB first */
6965 coladdr
= 0x00000000;
6966 rowaddr
= Samsung_OTP_Page
[PageAddr
];
6968 MSG(OTP
, "[%s]:(COLADDR) [0x%08x]/(ROWADDR)[0x%08x]\n", __func__
, coladdr
, rowaddr
);
6969 nand_get_device(mtd
, FL_READING
);
6971 (void)mtk_nand_set_command(0x30);
6973 (void)mtk_nand_set_command(0x65);
6975 MSG(OTP
, "[%s]: Start to write data to OTP area\n", __func__
);
6977 if (!mtk_nand_reset())
6979 bRet
= OTP_ERROR_RESET
;
6983 mtk_nand_set_mode(CNFG_OP_PRGM
);
6985 NFI_CLN_REG16(NFI_CNFG_REG16
, CNFG_READ_EN
);
6987 DRV_WriteReg32(NFI_CON_REG16
, sec_num
<< CON_NFI_SEC_SHIFT
);
6989 DRV_WriteReg32(NFI_STRADDR_REG32
, __virt_to_phys(BufferPtr
));
6990 NFI_SET_REG16(NFI_CNFG_REG16
, CNFG_AHB
);
6994 NFI_SET_REG16(NFI_CNFG_REG16
, CNFG_HW_ECC_EN
);
6997 NFI_CLN_REG16(NFI_CNFG_REG16
, CNFG_HW_ECC_EN
);
6999 mtk_nand_set_autoformat(true);
7003 if (!mtk_nand_set_command(NAND_CMD_SEQIN
))
7005 bRet
= OTP_ERROR_BUSY
;
7009 if (!mtk_nand_set_address(coladdr
, rowaddr
, 2, 3))
7011 bRet
= OTP_ERROR_BUSY
;
7015 if (!mtk_nand_status_ready(STA_NAND_BUSY
))
7017 bRet
= OTP_ERROR_BUSY
;
7021 mtk_nand_write_fdm_data((struct nand_chip
*)mtd
->priv
, BufferPtr
, sec_num
);
7022 (void)mtk_nand_write_page_data(mtd
, BufferPtr
, u4Size
);
7023 if (!mtk_nand_check_RW_count(u4Size
))
7025 MSG(OTP
, "[%s]: Check RW count timeout !\n", __func__
);
7026 bRet
= OTP_ERROR_TIMEOUT
;
7030 mtk_nand_stop_write();
7031 (void)mtk_nand_set_command(NAND_CMD_PAGEPROG
);
7032 while (DRV_Reg32(NFI_STA_REG32
) & STA_NAND_BUSY
) ;
7036 MSG(OTP
, "[%s]: End to write data to OTP area\n", __func__
);
7040 (void)mtk_nand_set_command( NAND_CMD_RESET
);
7041 nand_release_device(mtd
);
7045 static int mt_otp_open(struct inode
*inode
, struct file
*filp
)
7047 MSG(OTP
, "[%s]:(MAJOR)%d:(MINOR)%d\n", __func__
, MAJOR(inode
->i_rdev
), MINOR(inode
->i_rdev
));
7048 filp
->private_data
= (int *)OTP_MAGIC_NUM
;
7052 static int mt_otp_release(struct inode
*inode
, struct file
*filp
)
7054 MSG(OTP
, "[%s]:(MAJOR)%d:(MINOR)%d\n", __func__
, MAJOR(inode
->i_rdev
), MINOR(inode
->i_rdev
));
7058 static int mt_otp_access(unsigned int access_type
, unsigned int offset
, void *buff_ptr
, unsigned int length
, unsigned int *status
)
7060 unsigned int i
= 0, ret
= 0;
7061 char *BufAddr
= (char *)buff_ptr
;
7062 unsigned int PageAddr
, AccessLength
= 0;
7065 static char *p_D_Buff
= NULL
;
7068 if (!(p_D_Buff
= kmalloc(g_page_size
, GFP_KERNEL
)))
7071 *status
= OTP_ERROR_NOMEM
;
7075 MSG(OTP
, "[%s]: %s (0x%x) length:(%d bytes) !\n", __func__
, access_type
? "WRITE" : "READ", offset
, length
);
7079 PageAddr
= offset
/ g_page_size
;
7080 if (FS_OTP_READ
== access_type
)
7082 memset(p_D_Buff
, 0xff, g_page_size
);
7083 memset(S_Buff
, 0xff, (sizeof(char) * 64));
7085 MSG(OTP
, "[%s]: Read Access of page (%d)\n", __func__
, PageAddr
);
7087 Status
= g_mtk_otp_fuc
.OTPRead(PageAddr
, p_D_Buff
, &S_Buff
);
7090 if (OTP_SUCCESS
!= Status
)
7092 MSG(OTP
, "[%s]: Read status (%d)\n", __func__
, Status
);
7096 AccessLength
= g_page_size
- (offset
% g_page_size
);
7098 if (length
>= AccessLength
)
7100 memcpy(BufAddr
, (p_D_Buff
+ (offset
% g_page_size
)), AccessLength
);
7104 memcpy(BufAddr
, (p_D_Buff
+ (offset
% g_page_size
)), length
);
7106 } else if (FS_OTP_WRITE
== access_type
)
7108 AccessLength
= g_page_size
- (offset
% g_page_size
);
7109 memset(p_D_Buff
, 0xff, g_page_size
);
7110 memset(S_Buff
, 0xff, (sizeof(char) * 64));
7112 if (length
>= AccessLength
)
7114 memcpy((p_D_Buff
+ (offset
% g_page_size
)), BufAddr
, AccessLength
);
7118 memcpy((p_D_Buff
+ (offset
% g_page_size
)), BufAddr
, length
);
7121 Status
= g_mtk_otp_fuc
.OTPWrite(PageAddr
, p_D_Buff
, &S_Buff
);
7124 if (OTP_SUCCESS
!= Status
)
7126 MSG(OTP
, "[%s]: Write status (%d)\n", __func__
, Status
);
7131 MSG(OTP
, "[%s]: Error, not either read nor write operations !\n", __func__
);
7135 offset
+= AccessLength
;
7136 BufAddr
+= AccessLength
;
7137 if (length
<= AccessLength
)
7143 length
-= AccessLength
;
7144 MSG(OTP
, "[%s]: Remaining %s (%d) !\n", __func__
, access_type
? "WRITE" : "READ", length
);
7153 static long mt_otp_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
7156 static char *pbuf
= NULL
;
7158 void __user
*uarg
= (void __user
*)arg
;
7159 struct otp_ctl otpctl
;
7162 spin_lock(&g_OTPLock
);
7164 if (copy_from_user(&otpctl
, uarg
, sizeof(struct otp_ctl
)))
7170 if (false == g_bInitDone
)
7172 MSG(OTP
, "ERROR: NAND Flash Not initialized !!\n");
7177 if (!(pbuf
= kmalloc(sizeof(char) * otpctl
.Length
, GFP_KERNEL
)))
7185 case OTP_GET_LENGTH
:
7186 MSG(OTP
, "OTP IOCTL: OTP_GET_LENGTH\n");
7187 g_mtk_otp_fuc
.OTPQueryLength(&otpctl
.QLength
);
7188 otpctl
.status
= OTP_SUCCESS
;
7189 MSG(OTP
, "OTP IOCTL: The Length is %d\n", otpctl
.QLength
);
7192 MSG(OTP
, "OTP IOCTL: OTP_READ Offset(0x%x), Length(0x%x) \n", otpctl
.Offset
, otpctl
.Length
);
7193 memset(pbuf
, 0xff, sizeof(char) * otpctl
.Length
);
7195 mt_otp_access(FS_OTP_READ
, otpctl
.Offset
, pbuf
, otpctl
.Length
, &otpctl
.status
);
7197 if (copy_to_user(otpctl
.BufferPtr
, pbuf
, (sizeof(char) * otpctl
.Length
)))
7199 MSG(OTP
, "OTP IOCTL: Copy to user buffer Error !\n");
7204 MSG(OTP
, "OTP IOCTL: OTP_WRITE Offset(0x%x), Length(0x%x) \n", otpctl
.Offset
, otpctl
.Length
);
7205 if (copy_from_user(pbuf
, otpctl
.BufferPtr
, (sizeof(char) * otpctl
.Length
)))
7207 MSG(OTP
, "OTP IOCTL: Copy from user buffer Error !\n");
7210 mt_otp_access(FS_OTP_WRITE
, otpctl
.Offset
, pbuf
, otpctl
.Length
, &otpctl
.status
);
7216 ret
= copy_to_user(uarg
, &otpctl
, sizeof(struct otp_ctl
));
7221 spin_unlock(&g_OTPLock
);
7225 static struct file_operations nand_otp_fops
= {
7226 .owner
= THIS_MODULE
,
7227 .unlocked_ioctl
= mt_otp_ioctl
,
7228 .open
= mt_otp_open
,
7229 .release
= mt_otp_release
,
7232 static struct miscdevice nand_otp_dev
= {
7233 .minor
= MISC_DYNAMIC_MINOR
,
7235 .fops
= &nand_otp_fops
,
7239 /******************************************************************************
7240 Device driver structure
7241 ******************************************************************************/
7242 static struct platform_driver mtk_nand_driver
= {
7243 .probe
= mtk_nand_probe
,
7244 .remove
= mtk_nand_remove
,
7245 .suspend
= mtk_nand_suspend
,
7246 .resume
= mtk_nand_resume
,
7249 .owner
= THIS_MODULE
,
7253 /******************************************************************************
7257 * Init the device driver !
7268 ******************************************************************************/
7269 #define SEQ_printf(m, x...) \
7277 int mtk_nand_proc_show(struct seq_file
*m
, void *v
)
7280 SEQ_printf(m
, "ID:");
7281 for(i
=0;i
<devinfo
.id_length
;i
++){
7282 SEQ_printf(m
, " 0x%x", devinfo
.id
[i
]);
7284 SEQ_printf(m
, "\n");
7285 SEQ_printf(m
, "total size: %dMiB; part number: %s\n", devinfo
.totalsize
,devinfo
.devciename
);
7286 SEQ_printf(m
, "Current working in %s mode\n", g_i4Interrupt
? "interrupt" : "polling");
7287 SEQ_printf(m
, "NFI_ACCON(0x%x)=0x%x\n",(NFI_BASE
+0x000C),DRV_Reg32(NFI_ACCCON_REG32
));
7288 SEQ_printf(m
, "NFI_NAND_TYPE_CNFG_REG32= 0x%x\n",DRV_Reg32(NFI_NAND_TYPE_CNFG_REG32
));
7289 #if CFG_FPGA_PLATFORM
7290 SEQ_printf(m
, "[FPGA Dummy]DRV_CFG_NFIA(0x0)=0x0\n");
7291 SEQ_printf(m
, "[FPGA Dummy]DRV_CFG_NFIB(0x0)=0x0\n");
7293 SEQ_printf(m
, "DRV_CFG_NFIA(IO PAD:0x%x)=0x%x\n",(GPIO_BASE
+0xC20),*((volatile u32
*)(GPIO_BASE
+0xC20)));
7294 SEQ_printf(m
, "DRV_CFG_NFIB(CTRL PAD:0x%x)=0x%x\n",(GPIO_BASE
+0xB50),*((volatile u32
*)(GPIO_BASE
+0xB50)));
7296 #if CFG_PERFLOG_DEBUG
7297 SEQ_printf(m
, "Read Page Count:%d, Read Page totalTime:%lu, Avg. RPage:%lu\r\n",
7298 g_NandPerfLog
.ReadPageCount
,g_NandPerfLog
.ReadPageTotalTime
,
7299 g_NandPerfLog
.ReadPageCount
? (g_NandPerfLog
.ReadPageTotalTime
/g_NandPerfLog
.ReadPageCount
): 0);
7301 SEQ_printf(m
, "Read subPage Count:%d, Read subPage totalTime:%lu, Avg. RPage:%lu\r\n",
7302 g_NandPerfLog
.ReadSubPageCount
,g_NandPerfLog
.ReadSubPageTotalTime
,
7303 g_NandPerfLog
.ReadSubPageCount
? (g_NandPerfLog
.ReadSubPageTotalTime
/g_NandPerfLog
.ReadSubPageCount
): 0);
7305 SEQ_printf(m
, "Read Busy Count:%d, Read Busy totalTime:%lu, Avg. R Busy:%lu\r\n",
7306 g_NandPerfLog
.ReadBusyCount
,g_NandPerfLog
.ReadBusyTotalTime
,
7307 g_NandPerfLog
.ReadBusyCount
? (g_NandPerfLog
.ReadBusyTotalTime
/g_NandPerfLog
.ReadBusyCount
): 0);
7309 SEQ_printf(m
, "Read DMA Count:%d, Read DMA totalTime:%lu, Avg. R DMA:%lu\r\n",
7310 g_NandPerfLog
.ReadDMACount
,g_NandPerfLog
.ReadDMATotalTime
,
7311 g_NandPerfLog
.ReadDMACount
? (g_NandPerfLog
.ReadDMATotalTime
/g_NandPerfLog
.ReadDMACount
): 0);
7313 SEQ_printf(m
, "Write Page Count:%d, Write Page totalTime:%lu, Avg. WPage:%lu\r\n",
7314 g_NandPerfLog
.WritePageCount
,g_NandPerfLog
.WritePageTotalTime
,
7315 g_NandPerfLog
.WritePageCount
? (g_NandPerfLog
.WritePageTotalTime
/g_NandPerfLog
.WritePageCount
): 0);
7317 SEQ_printf(m
, "Write Busy Count:%d, Write Busy totalTime:%lu, Avg. W Busy:%lu\r\n",
7318 g_NandPerfLog
.WriteBusyCount
,g_NandPerfLog
.WriteBusyTotalTime
,
7319 g_NandPerfLog
.WriteBusyCount
? (g_NandPerfLog
.WriteBusyTotalTime
/g_NandPerfLog
.WriteBusyCount
): 0);
7321 SEQ_printf(m
, "Write DMA Count:%d, Write DMA totalTime:%lu, Avg. W DMA:%lu\r\n",
7322 g_NandPerfLog
.WriteDMACount
,g_NandPerfLog
.WriteDMATotalTime
,
7323 g_NandPerfLog
.WriteDMACount
? (g_NandPerfLog
.WriteDMATotalTime
/g_NandPerfLog
.WriteDMACount
): 0);
7325 SEQ_printf(m
, "EraseBlock Count:%d, EraseBlock totalTime:%lu, Avg. Erase:%lu\r\n",
7326 g_NandPerfLog
.EraseBlockCount
,g_NandPerfLog
.EraseBlockTotalTime
,
7327 g_NandPerfLog
.EraseBlockCount
? (g_NandPerfLog
.EraseBlockTotalTime
/g_NandPerfLog
.EraseBlockCount
): 0);
7334 static int mt_nand_proc_open(struct inode
*inode
, struct file
*file
)
7336 return single_open(file
, mtk_nand_proc_show
, inode
->i_private
);
7340 static const struct file_operations mtk_nand_fops
= {
7341 .open
= mt_nand_proc_open
,
7342 .write
= mtk_nand_proc_write
,
7344 .llseek
= seq_lseek
,
7345 .release
= single_release
,
7347 static int __init
mtk_nand_init(void)
7349 struct proc_dir_entry
*entry
;
7352 #if defined(NAND_OTP_SUPPORT)
7354 MSG(OTP
, "OTP: register NAND OTP device ...\n");
7355 err
= misc_register(&nand_otp_dev
);
7358 MSG(OTP
, "OTP: failed to register NAND OTP device!\n");
7361 spin_lock_init(&g_OTPLock
);
7364 #if (defined(NAND_OTP_SUPPORT) && SAMSUNG_OTP_SUPPORT)
7365 g_mtk_otp_fuc
.OTPQueryLength
= samsung_OTPQueryLength
;
7366 g_mtk_otp_fuc
.OTPRead
= samsung_OTPRead
;
7367 g_mtk_otp_fuc
.OTPWrite
= samsung_OTPWrite
;
7370 entry
= proc_create(PROCNAME
, 0664, NULL
, &mtk_nand_fops
);
7371 #if 0//removed in kernel 3.10
7372 entry
= create_proc_entry(PROCNAME
, 0664, NULL
);
7375 MSG(INIT
, "MTK Nand : unable to create /proc entry\n");
7378 entry
->read_proc
= mtk_nand_proc_read
;
7379 entry
->write_proc
= mtk_nand_proc_write
;
7382 //printk("MediaTek Nand driver init, version %s\n", VERSION);
7384 return platform_driver_register(&mtk_nand_driver
);
7387 /******************************************************************************
7391 * Free the device driver !
7402 ******************************************************************************/
7403 static void __exit
mtk_nand_exit(void)
7405 MSG(INIT
, "MediaTek Nand driver exit, version %s\n", VERSION
);
7406 #if defined(NAND_OTP_SUPPORT)
7407 misc_deregister(&nand_otp_dev
);
7410 #ifdef SAMSUNG_OTP_SUPPORT
7411 g_mtk_otp_fuc
.OTPQueryLength
= NULL
;
7412 g_mtk_otp_fuc
.OTPRead
= NULL
;
7413 g_mtk_otp_fuc
.OTPWrite
= NULL
;
7416 platform_driver_unregister(&mtk_nand_driver
);
7417 remove_proc_entry(PROCNAME
, NULL
);
7420 module_init(mtk_nand_init
);
7421 module_exit(mtk_nand_exit
);
7422 MODULE_LICENSE("GPL");