2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009, Intel Corporation and its suppliers.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 #include <linux/slab.h>
31 #define BLK_FROM_ADDR(addr) ((u32)(addr >> DeviceInfo.nBitsInBlockDataSize))
32 #define PAGE_FROM_ADDR(addr, Block) ((u16)((addr - (u64)Block * \
33 DeviceInfo.wBlockDataSize) >> DeviceInfo.nBitsInPageDataSize))
35 #define IS_SPARE_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
36 BAD_BLOCK) && SPARE_BLOCK == (pbt[blk] & SPARE_BLOCK))
38 #define IS_DATA_BLOCK(blk) (0 == (pbt[blk] & BAD_BLOCK))
40 #define IS_DISCARDED_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
41 BAD_BLOCK) && DISCARD_BLOCK == (pbt[blk] & DISCARD_BLOCK))
43 #define IS_BAD_BLOCK(blk) (BAD_BLOCK == (pbt[blk] & BAD_BLOCK))
46 void debug_boundary_lineno_error(int chnl
, int limit
, int no
,
47 int lineno
, char *filename
)
50 printk(KERN_ERR
"Boundary Check Fail value %d >= limit %d, "
51 "at %s:%d. Other info:%d. Aborting...\n",
52 chnl
, limit
, filename
, lineno
, no
);
54 /* static int globalmemsize; */
57 static u16
FTL_Cache_If_Hit(u64 dwPageAddr
);
58 static int FTL_Cache_Read(u64 dwPageAddr
);
59 static void FTL_Cache_Read_Page(u8
*pData
, u64 dwPageAddr
,
61 static void FTL_Cache_Write_Page(u8
*pData
, u64 dwPageAddr
,
62 u8 cache_blk
, u16 flag
);
63 static int FTL_Cache_Write(void);
64 static int FTL_Cache_Write_Back(u8
*pData
, u64 blk_addr
);
65 static void FTL_Calculate_LRU(void);
66 static u32
FTL_Get_Block_Index(u32 wBlockNum
);
68 static int FTL_Search_Block_Table_IN_Block(u32 BT_Block
,
69 u8 BT_Tag
, u16
*Page
);
70 static int FTL_Read_Block_Table(void);
71 static int FTL_Write_Block_Table(int wForce
);
72 static int FTL_Write_Block_Table_Data(void);
73 static int FTL_Check_Block_Table(int wOldTable
);
74 static int FTL_Static_Wear_Leveling(void);
75 static u32
FTL_Replace_Block_Table(void);
76 static int FTL_Write_IN_Progress_Block_Table_Page(void);
78 static u32
FTL_Get_Page_Num(u64 length
);
79 static u64
FTL_Get_Physical_Block_Addr(u64 blk_addr
);
81 static u32
FTL_Replace_OneBlock(u32 wBlockNum
,
83 static u32
FTL_Replace_LWBlock(u32 wBlockNum
,
84 int *pGarbageCollect
);
85 static u32
FTL_Replace_MWBlock(void);
86 static int FTL_Replace_Block(u64 blk_addr
);
87 static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX
);
89 static int FTL_Flash_Error_Handle(u8
*pData
, u64 old_page_addr
, u64 blk_addr
);
91 struct device_info_tag DeviceInfo
;
92 struct flash_cache_tag Cache
;
93 static struct spectra_l2_cache_info cache_l2
;
95 static u8
*cache_l2_page_buf
;
96 static u8
*cache_l2_blk_buf
;
102 static u16 g_wBlockTableOffset
;
103 static u32 g_wBlockTableIndex
;
104 static u8 g_cBlockTableStatus
;
106 static u8
*g_pTempBuf
;
107 static u8
*flag_check_blk_table
;
108 static u8
*tmp_buf_search_bt_in_block
;
109 static u8
*spare_buf_search_bt_in_block
;
110 static u8
*spare_buf_bt_search_bt_in_block
;
111 static u8
*tmp_buf1_read_blk_table
;
112 static u8
*tmp_buf2_read_blk_table
;
113 static u8
*flags_static_wear_leveling
;
114 static u8
*tmp_buf_write_blk_table_data
;
115 static u8
*tmp_buf_read_disturbance
;
117 u8
*buf_read_page_main_spare
;
118 u8
*buf_write_page_main_spare
;
119 u8
*buf_read_page_spare
;
120 u8
*buf_get_bad_block
;
122 #if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA)
123 struct flash_cache_delta_list_tag int_cache
[MAX_CHANS
+ MAX_DESCS
];
124 struct flash_cache_tag cache_start_copy
;
127 int g_wNumFreeBlocks
;
131 static u8 bt_flag
= FIRST_BT_ID
;
132 static u8 bt_block_changed
;
134 static u16 cache_block_to_write
;
135 static u8 last_erased
= FIRST_BT_ID
;
138 static u8 BT_GC_Called
;
141 #define COPY_BACK_BUF_NUM 10
143 static u8 ftl_cmd_cnt
; /* Init value is 0 */
146 u8
*g_pBTStartingCopy
;
147 u8
*g_pWearCounterCopy
;
148 u16
*g_pReadCounterCopy
;
149 u8
*g_pBlockTableCopies
;
150 u8
*g_pNextBlockTable
;
151 static u8
*cp_back_buf_copies
[COPY_BACK_BUF_NUM
];
152 static int cp_back_buf_idx
;
154 static u8
*g_temp_buf
;
156 #pragma pack(push, 1)
158 struct BTableChangesDelta
{
161 u16 g_wBlockTableOffset
;
162 u32 g_wBlockTableIndex
;
173 struct BTableChangesDelta
*p_BTableChangesDelta
;
177 #define MARK_BLOCK_AS_BAD(blocknode) (blocknode |= BAD_BLOCK)
178 #define MARK_BLK_AS_DISCARD(blk) (blk = (blk & ~SPARE_BLOCK) | DISCARD_BLOCK)
180 #define FTL_Get_LBAPBA_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
182 #define FTL_Get_WearCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
184 #define FTL_Get_ReadCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
186 #if SUPPORT_LARGE_BLOCKNUM
187 #define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
190 #define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
193 #define FTL_Get_WearCounter_Table_Flash_Size_Bytes \
194 FTL_Get_WearCounter_Table_Mem_Size_Bytes
195 #define FTL_Get_ReadCounter_Table_Flash_Size_Bytes \
196 FTL_Get_ReadCounter_Table_Mem_Size_Bytes
198 static u32
FTL_Get_Block_Table_Flash_Size_Bytes(void)
202 if (DeviceInfo
.MLCDevice
) {
203 byte_num
= FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
204 DeviceInfo
.wDataBlockNum
* sizeof(u8
) +
205 DeviceInfo
.wDataBlockNum
* sizeof(u16
);
207 byte_num
= FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
208 DeviceInfo
.wDataBlockNum
* sizeof(u8
);
211 byte_num
+= 4 * sizeof(u8
);
216 static u16
FTL_Get_Block_Table_Flash_Size_Pages(void)
218 return (u16
)FTL_Get_Page_Num(FTL_Get_Block_Table_Flash_Size_Bytes());
221 static int FTL_Copy_Block_Table_To_Flash(u8
*flashBuf
, u32 sizeToTx
,
224 u32 wBytesCopied
, blk_tbl_size
, wBytes
;
225 u32
*pbt
= (u32
*)g_pBlockTable
;
227 blk_tbl_size
= FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
229 (wBytes
< sizeToTx
) && ((wBytes
+ sizeTxed
) < blk_tbl_size
);
231 #if SUPPORT_LARGE_BLOCKNUM
232 flashBuf
[wBytes
] = (u8
)(pbt
[(wBytes
+ sizeTxed
) / 3]
233 >> (((wBytes
+ sizeTxed
) % 3) ?
234 ((((wBytes
+ sizeTxed
) % 3) == 2) ? 0 : 8) : 16)) & 0xFF;
236 flashBuf
[wBytes
] = (u8
)(pbt
[(wBytes
+ sizeTxed
) / 2]
237 >> (((wBytes
+ sizeTxed
) % 2) ? 0 : 8)) & 0xFF;
241 sizeTxed
= (sizeTxed
> blk_tbl_size
) ? (sizeTxed
- blk_tbl_size
) : 0;
242 blk_tbl_size
= FTL_Get_WearCounter_Table_Flash_Size_Bytes();
243 wBytesCopied
= wBytes
;
244 wBytes
= ((blk_tbl_size
- sizeTxed
) > (sizeToTx
- wBytesCopied
)) ?
245 (sizeToTx
- wBytesCopied
) : (blk_tbl_size
- sizeTxed
);
246 memcpy(flashBuf
+ wBytesCopied
, g_pWearCounter
+ sizeTxed
, wBytes
);
248 sizeTxed
= (sizeTxed
> blk_tbl_size
) ? (sizeTxed
- blk_tbl_size
) : 0;
250 if (DeviceInfo
.MLCDevice
) {
251 blk_tbl_size
= FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
252 wBytesCopied
+= wBytes
;
253 for (wBytes
= 0; ((wBytes
+ wBytesCopied
) < sizeToTx
) &&
254 ((wBytes
+ sizeTxed
) < blk_tbl_size
); wBytes
++)
255 flashBuf
[wBytes
+ wBytesCopied
] =
256 (g_pReadCounter
[(wBytes
+ sizeTxed
) / 2] >>
257 (((wBytes
+ sizeTxed
) % 2) ? 0 : 8)) & 0xFF;
260 return wBytesCopied
+ wBytes
;
263 static int FTL_Copy_Block_Table_From_Flash(u8
*flashBuf
,
264 u32 sizeToTx
, u32 sizeTxed
)
266 u32 wBytesCopied
, blk_tbl_size
, wBytes
;
267 u32
*pbt
= (u32
*)g_pBlockTable
;
269 blk_tbl_size
= FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
270 for (wBytes
= 0; (wBytes
< sizeToTx
) &&
271 ((wBytes
+ sizeTxed
) < blk_tbl_size
); wBytes
++) {
272 #if SUPPORT_LARGE_BLOCKNUM
273 if (!((wBytes
+ sizeTxed
) % 3))
274 pbt
[(wBytes
+ sizeTxed
) / 3] = 0;
275 pbt
[(wBytes
+ sizeTxed
) / 3] |=
276 (flashBuf
[wBytes
] << (((wBytes
+ sizeTxed
) % 3) ?
277 ((((wBytes
+ sizeTxed
) % 3) == 2) ? 0 : 8) : 16));
279 if (!((wBytes
+ sizeTxed
) % 2))
280 pbt
[(wBytes
+ sizeTxed
) / 2] = 0;
281 pbt
[(wBytes
+ sizeTxed
) / 2] |=
282 (flashBuf
[wBytes
] << (((wBytes
+ sizeTxed
) % 2) ?
287 sizeTxed
= (sizeTxed
> blk_tbl_size
) ? (sizeTxed
- blk_tbl_size
) : 0;
288 blk_tbl_size
= FTL_Get_WearCounter_Table_Flash_Size_Bytes();
289 wBytesCopied
= wBytes
;
290 wBytes
= ((blk_tbl_size
- sizeTxed
) > (sizeToTx
- wBytesCopied
)) ?
291 (sizeToTx
- wBytesCopied
) : (blk_tbl_size
- sizeTxed
);
292 memcpy(g_pWearCounter
+ sizeTxed
, flashBuf
+ wBytesCopied
, wBytes
);
293 sizeTxed
= (sizeTxed
> blk_tbl_size
) ? (sizeTxed
- blk_tbl_size
) : 0;
295 if (DeviceInfo
.MLCDevice
) {
296 wBytesCopied
+= wBytes
;
297 blk_tbl_size
= FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
298 for (wBytes
= 0; ((wBytes
+ wBytesCopied
) < sizeToTx
) &&
299 ((wBytes
+ sizeTxed
) < blk_tbl_size
); wBytes
++) {
300 if (((wBytes
+ sizeTxed
) % 2))
301 g_pReadCounter
[(wBytes
+ sizeTxed
) / 2] = 0;
302 g_pReadCounter
[(wBytes
+ sizeTxed
) / 2] |=
304 (((wBytes
+ sizeTxed
) % 2) ? 0 : 8));
308 return wBytesCopied
+wBytes
;
311 static int FTL_Insert_Block_Table_Signature(u8
*buf
, u8 tag
)
315 for (i
= 0; i
< BTSIG_BYTES
; i
++)
316 buf
[BTSIG_OFFSET
+ i
] =
317 ((tag
+ (i
* BTSIG_DELTA
) - FIRST_BT_ID
) %
318 (1 + LAST_BT_ID
-FIRST_BT_ID
)) + FIRST_BT_ID
;
323 static int FTL_Extract_Block_Table_Tag(u8
*buf
, u8
**tagarray
)
325 static u8 tag
[BTSIG_BYTES
>> 1];
326 int i
, j
, k
, tagi
, tagtemp
, status
;
328 *tagarray
= (u8
*)tag
;
331 for (i
= 0; i
< (BTSIG_BYTES
- 1); i
++) {
332 for (j
= i
+ 1; (j
< BTSIG_BYTES
) &&
333 (tagi
< (BTSIG_BYTES
>> 1)); j
++) {
334 tagtemp
= buf
[BTSIG_OFFSET
+ j
] -
335 buf
[BTSIG_OFFSET
+ i
];
336 if (tagtemp
&& !(tagtemp
% BTSIG_DELTA
)) {
337 tagtemp
= (buf
[BTSIG_OFFSET
+ i
] +
338 (1 + LAST_BT_ID
- FIRST_BT_ID
) -
340 (1 + LAST_BT_ID
- FIRST_BT_ID
);
342 for (k
= 0; k
< tagi
; k
++) {
343 if (tagtemp
== tag
[k
])
347 if (status
== FAIL
) {
348 tag
[tagi
++] = tagtemp
;
349 i
= (j
== (i
+ 1)) ? i
+ 1 : i
;
350 j
= (j
== (i
+ 1)) ? i
+ 1 : i
;
360 static int FTL_Execute_SPL_Recovery(void)
363 u32
*pbt
= (u32
*)g_pBlockTable
;
366 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
367 __FILE__
, __LINE__
, __func__
);
369 blks
= DeviceInfo
.wSpectraEndBlock
- DeviceInfo
.wSpectraStartBlock
;
370 for (j
= 0; j
<= blks
; j
++) {
372 if (((block
& BAD_BLOCK
) != BAD_BLOCK
) &&
373 ((block
& SPARE_BLOCK
) == SPARE_BLOCK
)) {
374 ret
= GLOB_LLD_Erase_Block(block
& ~BAD_BLOCK
);
376 nand_dbg_print(NAND_DBG_WARN
,
377 "NAND Program fail in %s, Line %d, "
378 "Function: %s, new Bad Block %d "
380 __FILE__
, __LINE__
, __func__
,
381 (int)(block
& ~BAD_BLOCK
));
382 MARK_BLOCK_AS_BAD(pbt
[j
]);
390 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
391 * Function: GLOB_FTL_IdentifyDevice
392 * Inputs: pointer to identify data structure
393 * Outputs: PASS / FAIL
394 * Description: the identify data structure is filled in with
395 * information for the block driver.
396 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
397 int GLOB_FTL_IdentifyDevice(struct spectra_indentfy_dev_tag
*dev_data
)
399 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
400 __FILE__
, __LINE__
, __func__
);
402 dev_data
->NumBlocks
= DeviceInfo
.wTotalBlocks
;
403 dev_data
->PagesPerBlock
= DeviceInfo
.wPagesPerBlock
;
404 dev_data
->PageDataSize
= DeviceInfo
.wPageDataSize
;
405 dev_data
->wECCBytesPerSector
= DeviceInfo
.wECCBytesPerSector
;
406 dev_data
->wDataBlockNum
= DeviceInfo
.wDataBlockNum
;
412 static int allocate_memory(void)
414 u32 block_table_size
, page_size
, block_size
, mem_size
;
421 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
422 __FILE__
, __LINE__
, __func__
);
424 page_size
= DeviceInfo
.wPageSize
;
425 block_size
= DeviceInfo
.wPagesPerBlock
* DeviceInfo
.wPageDataSize
;
427 block_table_size
= DeviceInfo
.wDataBlockNum
*
428 (sizeof(u32
) + sizeof(u8
) + sizeof(u16
));
429 block_table_size
+= (DeviceInfo
.wPageDataSize
-
430 (block_table_size
% DeviceInfo
.wPageDataSize
)) %
431 DeviceInfo
.wPageDataSize
;
433 /* Malloc memory for block tables */
434 g_pBlockTable
= kmalloc(block_table_size
, GFP_ATOMIC
);
436 goto block_table_fail
;
437 memset(g_pBlockTable
, 0, block_table_size
);
438 total_bytes
+= block_table_size
;
440 g_pWearCounter
= (u8
*)(g_pBlockTable
+
441 DeviceInfo
.wDataBlockNum
* sizeof(u32
));
443 if (DeviceInfo
.MLCDevice
)
444 g_pReadCounter
= (u16
*)(g_pBlockTable
+
445 DeviceInfo
.wDataBlockNum
*
446 (sizeof(u32
) + sizeof(u8
)));
448 /* Malloc memory and init for cache items */
449 for (i
= 0; i
< CACHE_ITEM_NUM
; i
++) {
450 Cache
.array
[i
].address
= NAND_CACHE_INIT_ADDR
;
451 Cache
.array
[i
].use_cnt
= 0;
452 Cache
.array
[i
].changed
= CLEAR
;
453 Cache
.array
[i
].buf
= kmalloc(Cache
.cache_item_size
,
455 if (!Cache
.array
[i
].buf
)
456 goto cache_item_fail
;
457 memset(Cache
.array
[i
].buf
, 0, Cache
.cache_item_size
);
458 total_bytes
+= Cache
.cache_item_size
;
461 /* Malloc memory for IPF */
462 g_pIPF
= kmalloc(page_size
, GFP_ATOMIC
);
465 memset(g_pIPF
, 0, page_size
);
466 total_bytes
+= page_size
;
468 /* Malloc memory for data merging during Level2 Cache flush */
469 cache_l2_page_buf
= kmalloc(page_size
, GFP_ATOMIC
);
470 if (!cache_l2_page_buf
)
471 goto cache_l2_page_buf_fail
;
472 memset(cache_l2_page_buf
, 0xff, page_size
);
473 total_bytes
+= page_size
;
475 cache_l2_blk_buf
= kmalloc(block_size
, GFP_ATOMIC
);
476 if (!cache_l2_blk_buf
)
477 goto cache_l2_blk_buf_fail
;
478 memset(cache_l2_blk_buf
, 0xff, block_size
);
479 total_bytes
+= block_size
;
481 /* Malloc memory for temp buffer */
482 g_pTempBuf
= kmalloc(Cache
.cache_item_size
, GFP_ATOMIC
);
485 memset(g_pTempBuf
, 0, Cache
.cache_item_size
);
486 total_bytes
+= Cache
.cache_item_size
;
488 /* Malloc memory for block table blocks */
489 mem_size
= (1 + LAST_BT_ID
- FIRST_BT_ID
) * sizeof(u32
);
490 g_pBTBlocks
= kmalloc(mem_size
, GFP_ATOMIC
);
493 memset(g_pBTBlocks
, 0xff, mem_size
);
494 total_bytes
+= mem_size
;
496 /* Malloc memory for function FTL_Check_Block_Table */
497 flag_check_blk_table
= kmalloc(DeviceInfo
.wDataBlockNum
, GFP_ATOMIC
);
498 if (!flag_check_blk_table
)
499 goto flag_check_blk_table_fail
;
500 total_bytes
+= DeviceInfo
.wDataBlockNum
;
502 /* Malloc memory for function FTL_Search_Block_Table_IN_Block */
503 tmp_buf_search_bt_in_block
= kmalloc(page_size
, GFP_ATOMIC
);
504 if (!tmp_buf_search_bt_in_block
)
505 goto tmp_buf_search_bt_in_block_fail
;
506 memset(tmp_buf_search_bt_in_block
, 0xff, page_size
);
507 total_bytes
+= page_size
;
509 mem_size
= DeviceInfo
.wPageSize
- DeviceInfo
.wPageDataSize
;
510 spare_buf_search_bt_in_block
= kmalloc(mem_size
, GFP_ATOMIC
);
511 if (!spare_buf_search_bt_in_block
)
512 goto spare_buf_search_bt_in_block_fail
;
513 memset(spare_buf_search_bt_in_block
, 0xff, mem_size
);
514 total_bytes
+= mem_size
;
516 spare_buf_bt_search_bt_in_block
= kmalloc(mem_size
, GFP_ATOMIC
);
517 if (!spare_buf_bt_search_bt_in_block
)
518 goto spare_buf_bt_search_bt_in_block_fail
;
519 memset(spare_buf_bt_search_bt_in_block
, 0xff, mem_size
);
520 total_bytes
+= mem_size
;
522 /* Malloc memory for function FTL_Read_Block_Table */
523 tmp_buf1_read_blk_table
= kmalloc(page_size
, GFP_ATOMIC
);
524 if (!tmp_buf1_read_blk_table
)
525 goto tmp_buf1_read_blk_table_fail
;
526 memset(tmp_buf1_read_blk_table
, 0xff, page_size
);
527 total_bytes
+= page_size
;
529 tmp_buf2_read_blk_table
= kmalloc(page_size
, GFP_ATOMIC
);
530 if (!tmp_buf2_read_blk_table
)
531 goto tmp_buf2_read_blk_table_fail
;
532 memset(tmp_buf2_read_blk_table
, 0xff, page_size
);
533 total_bytes
+= page_size
;
535 /* Malloc memory for function FTL_Static_Wear_Leveling */
536 flags_static_wear_leveling
= kmalloc(DeviceInfo
.wDataBlockNum
,
538 if (!flags_static_wear_leveling
)
539 goto flags_static_wear_leveling_fail
;
540 total_bytes
+= DeviceInfo
.wDataBlockNum
;
542 /* Malloc memory for function FTL_Write_Block_Table_Data */
543 if (FTL_Get_Block_Table_Flash_Size_Pages() > 3)
544 mem_size
= FTL_Get_Block_Table_Flash_Size_Bytes() -
545 2 * DeviceInfo
.wPageSize
;
547 mem_size
= DeviceInfo
.wPageSize
;
548 tmp_buf_write_blk_table_data
= kmalloc(mem_size
, GFP_ATOMIC
);
549 if (!tmp_buf_write_blk_table_data
)
550 goto tmp_buf_write_blk_table_data_fail
;
551 memset(tmp_buf_write_blk_table_data
, 0xff, mem_size
);
552 total_bytes
+= mem_size
;
554 /* Malloc memory for function FTL_Read_Disturbance */
555 tmp_buf_read_disturbance
= kmalloc(block_size
, GFP_ATOMIC
);
556 if (!tmp_buf_read_disturbance
)
557 goto tmp_buf_read_disturbance_fail
;
558 memset(tmp_buf_read_disturbance
, 0xff, block_size
);
559 total_bytes
+= block_size
;
561 /* Alloc mem for function NAND_Read_Page_Main_Spare of lld_nand.c */
562 buf_read_page_main_spare
= kmalloc(DeviceInfo
.wPageSize
, GFP_ATOMIC
);
563 if (!buf_read_page_main_spare
)
564 goto buf_read_page_main_spare_fail
;
565 total_bytes
+= DeviceInfo
.wPageSize
;
567 /* Alloc mem for function NAND_Write_Page_Main_Spare of lld_nand.c */
568 buf_write_page_main_spare
= kmalloc(DeviceInfo
.wPageSize
, GFP_ATOMIC
);
569 if (!buf_write_page_main_spare
)
570 goto buf_write_page_main_spare_fail
;
571 total_bytes
+= DeviceInfo
.wPageSize
;
573 /* Alloc mem for function NAND_Read_Page_Spare of lld_nand.c */
574 buf_read_page_spare
= kmalloc(DeviceInfo
.wPageSpareSize
, GFP_ATOMIC
);
575 if (!buf_read_page_spare
)
576 goto buf_read_page_spare_fail
;
577 memset(buf_read_page_spare
, 0xff, DeviceInfo
.wPageSpareSize
);
578 total_bytes
+= DeviceInfo
.wPageSpareSize
;
580 /* Alloc mem for function NAND_Get_Bad_Block of lld_nand.c */
581 buf_get_bad_block
= kmalloc(DeviceInfo
.wPageSpareSize
, GFP_ATOMIC
);
582 if (!buf_get_bad_block
)
583 goto buf_get_bad_block_fail
;
584 memset(buf_get_bad_block
, 0xff, DeviceInfo
.wPageSpareSize
);
585 total_bytes
+= DeviceInfo
.wPageSpareSize
;
588 g_temp_buf
= kmalloc(block_size
, GFP_ATOMIC
);
591 memset(g_temp_buf
, 0xff, block_size
);
592 total_bytes
+= block_size
;
594 /* Malloc memory for copy of block table used in CDMA mode */
595 g_pBTStartingCopy
= kmalloc(block_table_size
, GFP_ATOMIC
);
596 if (!g_pBTStartingCopy
)
597 goto bt_starting_copy
;
598 memset(g_pBTStartingCopy
, 0, block_table_size
);
599 total_bytes
+= block_table_size
;
601 g_pWearCounterCopy
= (u8
*)(g_pBTStartingCopy
+
602 DeviceInfo
.wDataBlockNum
* sizeof(u32
));
604 if (DeviceInfo
.MLCDevice
)
605 g_pReadCounterCopy
= (u16
*)(g_pBTStartingCopy
+
606 DeviceInfo
.wDataBlockNum
*
607 (sizeof(u32
) + sizeof(u8
)));
609 /* Malloc memory for block table copies */
610 mem_size
= 5 * DeviceInfo
.wDataBlockNum
* sizeof(u32
) +
611 5 * DeviceInfo
.wDataBlockNum
* sizeof(u8
);
612 if (DeviceInfo
.MLCDevice
)
613 mem_size
+= 5 * DeviceInfo
.wDataBlockNum
* sizeof(u16
);
614 g_pBlockTableCopies
= kmalloc(mem_size
, GFP_ATOMIC
);
615 if (!g_pBlockTableCopies
)
616 goto blk_table_copies_fail
;
617 memset(g_pBlockTableCopies
, 0, mem_size
);
618 total_bytes
+= mem_size
;
619 g_pNextBlockTable
= g_pBlockTableCopies
;
621 /* Malloc memory for Block Table Delta */
622 mem_size
= MAX_DESCS
* sizeof(struct BTableChangesDelta
);
623 g_pBTDelta
= kmalloc(mem_size
, GFP_ATOMIC
);
626 memset(g_pBTDelta
, 0, mem_size
);
627 total_bytes
+= mem_size
;
628 g_pBTDelta_Free
= g_pBTDelta
;
630 /* Malloc memory for Copy Back Buffers */
631 for (j
= 0; j
< COPY_BACK_BUF_NUM
; j
++) {
632 cp_back_buf_copies
[j
] = kmalloc(block_size
, GFP_ATOMIC
);
633 if (!cp_back_buf_copies
[j
])
634 goto cp_back_buf_copies_fail
;
635 memset(cp_back_buf_copies
[j
], 0, block_size
);
636 total_bytes
+= block_size
;
640 /* Malloc memory for pending commands list */
641 mem_size
= sizeof(struct pending_cmd
) * MAX_DESCS
;
642 info
.pcmds
= kzalloc(mem_size
, GFP_KERNEL
);
644 goto pending_cmds_buf_fail
;
645 total_bytes
+= mem_size
;
647 /* Malloc memory for CDMA descripter table */
648 mem_size
= sizeof(struct cdma_descriptor
) * MAX_DESCS
;
649 info
.cdma_desc_buf
= kzalloc(mem_size
, GFP_KERNEL
);
650 if (!info
.cdma_desc_buf
)
651 goto cdma_desc_buf_fail
;
652 total_bytes
+= mem_size
;
654 /* Malloc memory for Memcpy descripter table */
655 mem_size
= sizeof(struct memcpy_descriptor
) * MAX_DESCS
;
656 info
.memcp_desc_buf
= kzalloc(mem_size
, GFP_KERNEL
);
657 if (!info
.memcp_desc_buf
)
658 goto memcp_desc_buf_fail
;
659 total_bytes
+= mem_size
;
662 nand_dbg_print(NAND_DBG_WARN
,
663 "Total memory allocated in FTL layer: %d\n", total_bytes
);
669 kfree(info
.cdma_desc_buf
);
672 pending_cmds_buf_fail
:
673 cp_back_buf_copies_fail
:
676 kfree(cp_back_buf_copies
[j
]);
679 kfree(g_pBlockTableCopies
);
680 blk_table_copies_fail
:
681 kfree(g_pBTStartingCopy
);
685 kfree(buf_get_bad_block
);
688 buf_get_bad_block_fail
:
689 kfree(buf_read_page_spare
);
690 buf_read_page_spare_fail
:
691 kfree(buf_write_page_main_spare
);
692 buf_write_page_main_spare_fail
:
693 kfree(buf_read_page_main_spare
);
694 buf_read_page_main_spare_fail
:
695 kfree(tmp_buf_read_disturbance
);
696 tmp_buf_read_disturbance_fail
:
697 kfree(tmp_buf_write_blk_table_data
);
698 tmp_buf_write_blk_table_data_fail
:
699 kfree(flags_static_wear_leveling
);
700 flags_static_wear_leveling_fail
:
701 kfree(tmp_buf2_read_blk_table
);
702 tmp_buf2_read_blk_table_fail
:
703 kfree(tmp_buf1_read_blk_table
);
704 tmp_buf1_read_blk_table_fail
:
705 kfree(spare_buf_bt_search_bt_in_block
);
706 spare_buf_bt_search_bt_in_block_fail
:
707 kfree(spare_buf_search_bt_in_block
);
708 spare_buf_search_bt_in_block_fail
:
709 kfree(tmp_buf_search_bt_in_block
);
710 tmp_buf_search_bt_in_block_fail
:
711 kfree(flag_check_blk_table
);
712 flag_check_blk_table_fail
:
717 kfree(cache_l2_blk_buf
);
718 cache_l2_blk_buf_fail
:
719 kfree(cache_l2_page_buf
);
720 cache_l2_page_buf_fail
:
726 kfree(Cache
.array
[i
].buf
);
727 kfree(g_pBlockTable
);
729 printk(KERN_ERR
"Failed to kmalloc memory in %s Line %d.\n",
736 static int free_memory(void)
741 kfree(info
.memcp_desc_buf
);
742 kfree(info
.cdma_desc_buf
);
744 for (i
= COPY_BACK_BUF_NUM
- 1; i
>= 0; i
--)
745 kfree(cp_back_buf_copies
[i
]);
747 kfree(g_pBlockTableCopies
);
748 kfree(g_pBTStartingCopy
);
750 kfree(buf_get_bad_block
);
752 kfree(buf_read_page_spare
);
753 kfree(buf_write_page_main_spare
);
754 kfree(buf_read_page_main_spare
);
755 kfree(tmp_buf_read_disturbance
);
756 kfree(tmp_buf_write_blk_table_data
);
757 kfree(flags_static_wear_leveling
);
758 kfree(tmp_buf2_read_blk_table
);
759 kfree(tmp_buf1_read_blk_table
);
760 kfree(spare_buf_bt_search_bt_in_block
);
761 kfree(spare_buf_search_bt_in_block
);
762 kfree(tmp_buf_search_bt_in_block
);
763 kfree(flag_check_blk_table
);
767 for (i
= CACHE_ITEM_NUM
- 1; i
>= 0; i
--)
768 kfree(Cache
.array
[i
].buf
);
769 kfree(g_pBlockTable
);
774 static void dump_cache_l2_table(void)
777 struct spectra_l2_cache_list
*pnd
;
781 list_for_each(p
, &cache_l2
.table
.list
) {
782 pnd
= list_entry(p
, struct spectra_l2_cache_list
, list
);
783 nand_dbg_print(NAND_DBG_WARN
, "dump_cache_l2_table node: %d, logical_blk_num: %d\n", n
, pnd
->logical_blk_num
);
785 for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) {
786 if (pnd->pages_array[i] != MAX_U32_VALUE)
787 nand_dbg_print(NAND_DBG_WARN, " pages_array[%d]: 0x%x\n", i, pnd->pages_array[i]);
794 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
795 * Function: GLOB_FTL_Init
797 * Outputs: PASS=0 / FAIL=1
798 * Description: allocates the memory for cache array,
799 * important data structures
800 * clears the cache array
801 * reads the block table from flash into array
802 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
803 int GLOB_FTL_Init(void)
807 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
808 __FILE__
, __LINE__
, __func__
);
810 Cache
.pages_per_item
= 1;
811 Cache
.cache_item_size
= 1 * DeviceInfo
.wPageDataSize
;
813 if (allocate_memory() != PASS
)
817 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
818 memcpy((void *)&cache_start_copy
, (void *)&Cache
,
819 sizeof(struct flash_cache_tag
));
820 memset((void *)&int_cache
, -1,
821 sizeof(struct flash_cache_delta_list_tag
) *
822 (MAX_CHANS
+ MAX_DESCS
));
827 if (FTL_Read_Block_Table() != PASS
)
830 /* Init the Level2 Cache data structure */
831 for (i
= 0; i
< BLK_NUM_FOR_L2_CACHE
; i
++)
832 cache_l2
.blk_array
[i
] = MAX_U32_VALUE
;
833 cache_l2
.cur_blk_idx
= 0;
834 cache_l2
.cur_page_num
= 0;
835 INIT_LIST_HEAD(&cache_l2
.table
.list
);
836 cache_l2
.table
.logical_blk_num
= MAX_U32_VALUE
;
838 dump_cache_l2_table();
846 static void save_blk_table_changes(u16 idx
)
849 u32
*pbt
= (u32
*)g_pBTStartingCopy
;
851 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
855 id
= idx
- MAX_CHANS
;
856 if (int_cache
[id
].item
!= -1) {
857 cache_blks
= int_cache
[id
].item
;
858 cache_start_copy
.array
[cache_blks
].address
=
859 int_cache
[id
].cache
.address
;
860 cache_start_copy
.array
[cache_blks
].changed
=
861 int_cache
[id
].cache
.changed
;
865 ftl_cmd
= p_BTableChangesDelta
->ftl_cmd_cnt
;
867 while (ftl_cmd
<= PendingCMD
[idx
].Tag
) {
868 if (p_BTableChangesDelta
->ValidFields
== 0x01) {
869 g_wBlockTableOffset
=
870 p_BTableChangesDelta
->g_wBlockTableOffset
;
871 } else if (p_BTableChangesDelta
->ValidFields
== 0x0C) {
872 pbt
[p_BTableChangesDelta
->BT_Index
] =
873 p_BTableChangesDelta
->BT_Entry_Value
;
874 debug_boundary_error(((
875 p_BTableChangesDelta
->BT_Index
)),
876 DeviceInfo
.wDataBlockNum
, 0);
877 } else if (p_BTableChangesDelta
->ValidFields
== 0x03) {
878 g_wBlockTableOffset
=
879 p_BTableChangesDelta
->g_wBlockTableOffset
;
881 p_BTableChangesDelta
->g_wBlockTableIndex
;
882 } else if (p_BTableChangesDelta
->ValidFields
== 0x30) {
883 g_pWearCounterCopy
[p_BTableChangesDelta
->WC_Index
] =
884 p_BTableChangesDelta
->WC_Entry_Value
;
885 } else if ((DeviceInfo
.MLCDevice
) &&
886 (p_BTableChangesDelta
->ValidFields
== 0xC0)) {
887 g_pReadCounterCopy
[p_BTableChangesDelta
->RC_Index
] =
888 p_BTableChangesDelta
->RC_Entry_Value
;
889 nand_dbg_print(NAND_DBG_DEBUG
,
890 "In event status setting read counter "
891 "GLOB_ftl_cmd_cnt %u Count %u Index %u\n",
893 p_BTableChangesDelta
->RC_Entry_Value
,
894 (unsigned int)p_BTableChangesDelta
->RC_Index
);
896 nand_dbg_print(NAND_DBG_DEBUG
,
897 "This should never occur \n");
899 p_BTableChangesDelta
+= 1;
900 ftl_cmd
= p_BTableChangesDelta
->ftl_cmd_cnt
;
904 static void discard_cmds(u16 n
)
906 u32
*pbt
= (u32
*)g_pBTStartingCopy
;
909 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
914 if ((PendingCMD
[n
].CMD
== WRITE_MAIN_CMD
) ||
915 (PendingCMD
[n
].CMD
== WRITE_MAIN_SPARE_CMD
)) {
916 for (k
= 0; k
< DeviceInfo
.wDataBlockNum
; k
++) {
917 if (PendingCMD
[n
].Block
== (pbt
[k
] & (~BAD_BLOCK
)))
918 MARK_BLK_AS_DISCARD(pbt
[k
]);
922 ftl_cmd
= p_BTableChangesDelta
->ftl_cmd_cnt
;
923 while (ftl_cmd
<= PendingCMD
[n
].Tag
) {
924 p_BTableChangesDelta
+= 1;
925 ftl_cmd
= p_BTableChangesDelta
->ftl_cmd_cnt
;
928 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
931 if (int_cache
[id
].item
!= -1) {
932 cache_blks
= int_cache
[id
].item
;
933 if (PendingCMD
[n
].CMD
== MEMCOPY_CMD
) {
934 if ((cache_start_copy
.array
[cache_blks
].buf
<=
935 PendingCMD
[n
].DataDestAddr
) &&
936 ((cache_start_copy
.array
[cache_blks
].buf
+
937 Cache
.cache_item_size
) >
938 PendingCMD
[n
].DataDestAddr
)) {
939 cache_start_copy
.array
[cache_blks
].address
=
940 NAND_CACHE_INIT_ADDR
;
941 cache_start_copy
.array
[cache_blks
].use_cnt
=
943 cache_start_copy
.array
[cache_blks
].changed
=
947 cache_start_copy
.array
[cache_blks
].address
=
948 int_cache
[id
].cache
.address
;
949 cache_start_copy
.array
[cache_blks
].changed
=
950 int_cache
[id
].cache
.changed
;
956 static void process_cmd_pass(int *first_failed_cmd
, u16 idx
)
958 if (0 == *first_failed_cmd
)
959 save_blk_table_changes(idx
);
964 static void process_cmd_fail_abort(int *first_failed_cmd
,
967 u32
*pbt
= (u32
*)g_pBTStartingCopy
;
970 int erase_fail
, program_fail
;
971 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
976 if (0 == *first_failed_cmd
)
977 *first_failed_cmd
= PendingCMD
[idx
].SBDCmdIndex
;
979 nand_dbg_print(NAND_DBG_DEBUG
, "Uncorrectable error has occured "
980 "while executing %u Command %u accesing Block %u\n",
981 (unsigned int)p_BTableChangesDelta
->ftl_cmd_cnt
,
983 (unsigned int)PendingCMD
[idx
].Block
);
985 ftl_cmd
= p_BTableChangesDelta
->ftl_cmd_cnt
;
986 while (ftl_cmd
<= PendingCMD
[idx
].Tag
) {
987 p_BTableChangesDelta
+= 1;
988 ftl_cmd
= p_BTableChangesDelta
->ftl_cmd_cnt
;
991 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
992 id
= idx
- MAX_CHANS
;
994 if (int_cache
[id
].item
!= -1) {
995 cache_blks
= int_cache
[id
].item
;
996 if ((PendingCMD
[idx
].CMD
== WRITE_MAIN_CMD
)) {
997 cache_start_copy
.array
[cache_blks
].address
=
998 int_cache
[id
].cache
.address
;
999 cache_start_copy
.array
[cache_blks
].changed
= SET
;
1000 } else if ((PendingCMD
[idx
].CMD
== READ_MAIN_CMD
)) {
1001 cache_start_copy
.array
[cache_blks
].address
=
1002 NAND_CACHE_INIT_ADDR
;
1003 cache_start_copy
.array
[cache_blks
].use_cnt
= 0;
1004 cache_start_copy
.array
[cache_blks
].changed
=
1006 } else if (PendingCMD
[idx
].CMD
== ERASE_CMD
) {
1008 } else if (PendingCMD
[idx
].CMD
== MEMCOPY_CMD
) {
1014 erase_fail
= (event
== EVENT_ERASE_FAILURE
) &&
1015 (PendingCMD
[idx
].CMD
== ERASE_CMD
);
1017 program_fail
= (event
== EVENT_PROGRAM_FAILURE
) &&
1018 ((PendingCMD
[idx
].CMD
== WRITE_MAIN_CMD
) ||
1019 (PendingCMD
[idx
].CMD
== WRITE_MAIN_SPARE_CMD
));
1021 if (erase_fail
|| program_fail
) {
1022 for (i
= 0; i
< DeviceInfo
.wDataBlockNum
; i
++) {
1023 if (PendingCMD
[idx
].Block
==
1024 (pbt
[i
] & (~BAD_BLOCK
)))
1025 MARK_BLOCK_AS_BAD(pbt
[i
]);
1030 static void process_cmd(int *first_failed_cmd
, u16 idx
, int event
)
1035 if (p_BTableChangesDelta
->ftl_cmd_cnt
== PendingCMD
[idx
].Tag
)
1038 if (PendingCMD
[idx
].Status
== CMD_PASS
) {
1039 process_cmd_pass(first_failed_cmd
, idx
);
1040 } else if ((PendingCMD
[idx
].Status
== CMD_FAIL
) ||
1041 (PendingCMD
[idx
].Status
== CMD_ABORT
)) {
1042 process_cmd_fail_abort(first_failed_cmd
, idx
, event
);
1043 } else if ((PendingCMD
[idx
].Status
== CMD_NOT_DONE
) &&
1044 PendingCMD
[idx
].Tag
) {
1045 nand_dbg_print(NAND_DBG_DEBUG
,
1046 " Command no. %hu is not executed\n",
1047 (unsigned int)PendingCMD
[idx
].Tag
);
1048 ftl_cmd
= p_BTableChangesDelta
->ftl_cmd_cnt
;
1049 while (ftl_cmd
<= PendingCMD
[idx
].Tag
) {
1050 p_BTableChangesDelta
+= 1;
1051 ftl_cmd
= p_BTableChangesDelta
->ftl_cmd_cnt
;
1057 static void process_cmd(int *first_failed_cmd
, u16 idx
, int event
)
1059 printk(KERN_ERR
"temporary workaround function. "
1060 "Should not be called! \n");
1063 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1064 * Function: GLOB_FTL_Event_Status
1066 * Outputs: Event Code
1067 * Description: It is called by SBD after hardware interrupt signalling
1068 * completion of commands chain
1069 * It does following things
1070 * get event status from LLD
1071 * analyze command chain status
1072 * determine last command executed
1074 * rebuild the block table in case of uncorrectable error
1076 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1077 int GLOB_FTL_Event_Status(int *first_failed_cmd
)
1079 int event_code
= PASS
;
1082 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
1083 __FILE__
, __LINE__
, __func__
);
1085 *first_failed_cmd
= 0;
1087 event_code
= GLOB_LLD_Event_Status();
1089 switch (event_code
) {
1091 nand_dbg_print(NAND_DBG_DEBUG
, "Handling EVENT_PASS\n");
1093 case EVENT_UNCORRECTABLE_DATA_ERROR
:
1094 nand_dbg_print(NAND_DBG_DEBUG
, "Handling Uncorrectable ECC!\n");
1096 case EVENT_PROGRAM_FAILURE
:
1097 case EVENT_ERASE_FAILURE
:
1098 nand_dbg_print(NAND_DBG_WARN
, "Handling Ugly case. "
1099 "Event code: 0x%x\n", event_code
);
1100 p_BTableChangesDelta
=
1101 (struct BTableChangesDelta
*)g_pBTDelta
;
1102 for (i_P
= MAX_CHANS
; i_P
< (ftl_cmd_cnt
+ MAX_CHANS
);
1104 process_cmd(first_failed_cmd
, i_P
, event_code
);
1105 memcpy(g_pBlockTable
, g_pBTStartingCopy
,
1106 DeviceInfo
.wDataBlockNum
* sizeof(u32
));
1107 memcpy(g_pWearCounter
, g_pWearCounterCopy
,
1108 DeviceInfo
.wDataBlockNum
* sizeof(u8
));
1109 if (DeviceInfo
.MLCDevice
)
1110 memcpy(g_pReadCounter
, g_pReadCounterCopy
,
1111 DeviceInfo
.wDataBlockNum
* sizeof(u16
));
1113 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1114 memcpy((void *)&Cache
, (void *)&cache_start_copy
,
1115 sizeof(struct flash_cache_tag
));
1116 memset((void *)&int_cache
, -1,
1117 sizeof(struct flash_cache_delta_list_tag
) *
1118 (MAX_DESCS
+ MAX_CHANS
));
1122 nand_dbg_print(NAND_DBG_WARN
,
1123 "Handling unexpected event code - 0x%x\n",
1129 memcpy(g_pBTStartingCopy
, g_pBlockTable
,
1130 DeviceInfo
.wDataBlockNum
* sizeof(u32
));
1131 memcpy(g_pWearCounterCopy
, g_pWearCounter
,
1132 DeviceInfo
.wDataBlockNum
* sizeof(u8
));
1133 if (DeviceInfo
.MLCDevice
)
1134 memcpy(g_pReadCounterCopy
, g_pReadCounter
,
1135 DeviceInfo
.wDataBlockNum
* sizeof(u16
));
1137 g_pBTDelta_Free
= g_pBTDelta
;
1139 g_pNextBlockTable
= g_pBlockTableCopies
;
1140 cp_back_buf_idx
= 0;
1142 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1143 memcpy((void *)&cache_start_copy
, (void *)&Cache
,
1144 sizeof(struct flash_cache_tag
));
1145 memset((void *)&int_cache
, -1,
1146 sizeof(struct flash_cache_delta_list_tag
) *
1147 (MAX_DESCS
+ MAX_CHANS
));
1153 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1154 * Function: glob_ftl_execute_cmds
1157 * Description: pass thru to LLD
1158 ***************************************************************/
1159 u16
glob_ftl_execute_cmds(void)
1161 nand_dbg_print(NAND_DBG_TRACE
,
1162 "glob_ftl_execute_cmds: ftl_cmd_cnt %u\n",
1163 (unsigned int)ftl_cmd_cnt
);
1165 return glob_lld_execute_cmds();
1171 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1172 * Function: GLOB_FTL_Read Immediate
1173 * Inputs: pointer to data
1175 * Outputs: PASS / FAIL
1176 * Description: Reads one page of data into RAM directly from flash without
1177 * using or disturbing cache.It is assumed this function is called
1178 * with CMD-DMA disabled.
1179 *****************************************************************/
1180 int GLOB_FTL_Read_Immediate(u8
*read_data
, u64 addr
)
1186 u32
*pbt
= (u32
*)g_pBlockTable
;
1188 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
1189 __FILE__
, __LINE__
, __func__
);
1191 Block
= BLK_FROM_ADDR(addr
);
1192 Page
= PAGE_FROM_ADDR(addr
, Block
);
1194 if (!IS_SPARE_BLOCK(Block
))
1197 phy_blk
= pbt
[Block
];
1198 wResult
= GLOB_LLD_Read_Page_Main(read_data
, phy_blk
, Page
, 1);
1200 if (DeviceInfo
.MLCDevice
) {
1201 g_pReadCounter
[phy_blk
- DeviceInfo
.wSpectraStartBlock
]++;
1202 if (g_pReadCounter
[phy_blk
- DeviceInfo
.wSpectraStartBlock
]
1203 >= MAX_READ_COUNTER
)
1204 FTL_Read_Disturbance(phy_blk
);
1205 if (g_cBlockTableStatus
!= IN_PROGRESS_BLOCK_TABLE
) {
1206 g_cBlockTableStatus
= IN_PROGRESS_BLOCK_TABLE
;
1207 FTL_Write_IN_Progress_Block_Table_Page();
1215 #ifdef SUPPORT_BIG_ENDIAN
1216 /*********************************************************************
1217 * Function: FTL_Invert_Block_Table
1220 * Description: Re-format the block table in ram based on BIG_ENDIAN and
1221 * LARGE_BLOCKNUM if necessary
1222 **********************************************************************/
1223 static void FTL_Invert_Block_Table(void)
1226 u32
*pbt
= (u32
*)g_pBlockTable
;
1228 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
1229 __FILE__
, __LINE__
, __func__
);
1231 #ifdef SUPPORT_LARGE_BLOCKNUM
1232 for (i
= 0; i
< DeviceInfo
.wDataBlockNum
; i
++) {
1233 pbt
[i
] = INVERTUINT32(pbt
[i
]);
1234 g_pWearCounter
[i
] = INVERTUINT32(g_pWearCounter
[i
]);
1237 for (i
= 0; i
< DeviceInfo
.wDataBlockNum
; i
++) {
1238 pbt
[i
] = INVERTUINT16(pbt
[i
]);
1239 g_pWearCounter
[i
] = INVERTUINT16(g_pWearCounter
[i
]);
1245 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1246 * Function: GLOB_FTL_Flash_Init
1248 * Outputs: PASS=0 / FAIL=0x01 (based on read ID)
1249 * Description: The flash controller is initialized
1250 * The flash device is reset
1251 * Perform a flash READ ID command to confirm that a
1252 * valid device is attached and active.
1253 * The DeviceInfo structure gets filled in
1254 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1255 int GLOB_FTL_Flash_Init(void)
1259 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
1260 __FILE__
, __LINE__
, __func__
);
1264 GLOB_LLD_Flash_Init();
1266 status
= GLOB_LLD_Read_Device_ID();
1271 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1273 * Outputs: PASS=0 / FAIL=0x01 (based on read ID)
1274 * Description: The flash controller is released
1275 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1276 int GLOB_FTL_Flash_Release(void)
1278 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
1279 __FILE__
, __LINE__
, __func__
);
1281 return GLOB_LLD_Flash_Release();
1285 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1286 * Function: GLOB_FTL_Cache_Release
1289 * Description: release all allocated memory in GLOB_FTL_Init
1290 * (allocated in GLOB_FTL_Init)
1291 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1292 void GLOB_FTL_Cache_Release(void)
1294 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
1295 __FILE__
, __LINE__
, __func__
);
1300 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1301 * Function: FTL_Cache_If_Hit
1302 * Inputs: Page Address
1303 * Outputs: Block number/UNHIT BLOCK
1304 * Description: Determines if the addressed page is in cache
1305 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1306 static u16
FTL_Cache_If_Hit(u64 page_addr
)
1312 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
1313 __FILE__
, __LINE__
, __func__
);
1315 item
= UNHIT_CACHE_ITEM
;
1316 for (i
= 0; i
< CACHE_ITEM_NUM
; i
++) {
1317 addr
= Cache
.array
[i
].address
;
1318 if ((page_addr
>= addr
) &&
1319 (page_addr
< (addr
+ Cache
.cache_item_size
))) {
1328 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1329 * Function: FTL_Calculate_LRU
1332 * Description: Calculate the least recently block in a cache and record its
1333 * index in LRU field.
1334 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1335 static void FTL_Calculate_LRU(void)
1337 u16 i
, bCurrentLRU
, bTempCount
;
1339 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
1340 __FILE__
, __LINE__
, __func__
);
1343 bTempCount
= MAX_WORD_VALUE
;
1345 for (i
= 0; i
< CACHE_ITEM_NUM
; i
++) {
1346 if (Cache
.array
[i
].use_cnt
< bTempCount
) {
1348 bTempCount
= Cache
.array
[i
].use_cnt
;
1352 Cache
.LRU
= bCurrentLRU
;
1355 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1356 * Function: FTL_Cache_Read_Page
1357 * Inputs: pointer to read buffer, logical address and cache item number
1359 * Description: Read the page from the cached block addressed by blocknumber
1360 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1361 static void FTL_Cache_Read_Page(u8
*data_buf
, u64 logic_addr
, u16 cache_item
)
1365 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
1366 __FILE__
, __LINE__
, __func__
);
1368 start_addr
= Cache
.array
[cache_item
].buf
;
1369 start_addr
+= (u32
)(((logic_addr
- Cache
.array
[cache_item
].address
) >>
1370 DeviceInfo
.nBitsInPageDataSize
) * DeviceInfo
.wPageDataSize
);
1373 GLOB_LLD_MemCopy_CMD(data_buf
, start_addr
,
1374 DeviceInfo
.wPageDataSize
, 0);
1377 memcpy(data_buf
, start_addr
, DeviceInfo
.wPageDataSize
);
1380 if (Cache
.array
[cache_item
].use_cnt
< MAX_WORD_VALUE
)
1381 Cache
.array
[cache_item
].use_cnt
++;
1384 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1385 * Function: FTL_Cache_Read_All
1386 * Inputs: pointer to read buffer,block address
1387 * Outputs: PASS=0 / FAIL =1
1388 * Description: It reads pages in cache
1389 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1390 static int FTL_Cache_Read_All(u8
*pData
, u64 phy_addr
)
1397 u32
*pbt
= (u32
*)g_pBlockTable
;
1400 Block
= BLK_FROM_ADDR(phy_addr
);
1401 Page
= PAGE_FROM_ADDR(phy_addr
, Block
);
1402 PageCount
= Cache
.pages_per_item
;
1404 nand_dbg_print(NAND_DBG_DEBUG
,
1405 "%s, Line %d, Function: %s, Block: 0x%x\n",
1406 __FILE__
, __LINE__
, __func__
, Block
);
1409 for (i
= 0; i
< DeviceInfo
.wDataBlockNum
; i
++) {
1410 if ((pbt
[i
] & (~BAD_BLOCK
)) == Block
) {
1412 if (IS_SPARE_BLOCK(i
) || IS_BAD_BLOCK(i
) ||
1413 IS_DISCARDED_BLOCK(i
)) {
1414 /* Add by yunpeng -2008.12.3 */
1416 GLOB_LLD_MemCopy_CMD(pData
, g_temp_buf
,
1417 PageCount
* DeviceInfo
.wPageDataSize
, 0);
1421 PageCount
* DeviceInfo
.wPageDataSize
);
1425 continue; /* break ?? */
1430 if (0xffffffff == lba
)
1431 printk(KERN_ERR
"FTL_Cache_Read_All: Block is not found in BT\n");
1434 wResult
= GLOB_LLD_Read_Page_Main_cdma(pData
, Block
, Page
,
1435 PageCount
, LLD_CMD_FLAG_MODE_CDMA
);
1436 if (DeviceInfo
.MLCDevice
) {
1437 g_pReadCounter
[Block
- DeviceInfo
.wSpectraStartBlock
]++;
1438 nand_dbg_print(NAND_DBG_DEBUG
,
1439 "Read Counter modified in ftl_cmd_cnt %u"
1440 " Block %u Counter%u\n",
1441 ftl_cmd_cnt
, (unsigned int)Block
,
1442 g_pReadCounter
[Block
-
1443 DeviceInfo
.wSpectraStartBlock
]);
1445 p_BTableChangesDelta
=
1446 (struct BTableChangesDelta
*)g_pBTDelta_Free
;
1447 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
1448 p_BTableChangesDelta
->ftl_cmd_cnt
= ftl_cmd_cnt
;
1449 p_BTableChangesDelta
->RC_Index
=
1450 Block
- DeviceInfo
.wSpectraStartBlock
;
1451 p_BTableChangesDelta
->RC_Entry_Value
=
1452 g_pReadCounter
[Block
- DeviceInfo
.wSpectraStartBlock
];
1453 p_BTableChangesDelta
->ValidFields
= 0xC0;
1457 if (g_pReadCounter
[Block
- DeviceInfo
.wSpectraStartBlock
] >=
1459 FTL_Read_Disturbance(Block
);
1460 if (g_cBlockTableStatus
!= IN_PROGRESS_BLOCK_TABLE
) {
1461 g_cBlockTableStatus
= IN_PROGRESS_BLOCK_TABLE
;
1462 FTL_Write_IN_Progress_Block_Table_Page();
1468 wResult
= GLOB_LLD_Read_Page_Main(pData
, Block
, Page
, PageCount
);
1469 if (wResult
== FAIL
)
1472 if (DeviceInfo
.MLCDevice
) {
1473 g_pReadCounter
[Block
- DeviceInfo
.wSpectraStartBlock
]++;
1474 if (g_pReadCounter
[Block
- DeviceInfo
.wSpectraStartBlock
] >=
1476 FTL_Read_Disturbance(Block
);
1477 if (g_cBlockTableStatus
!= IN_PROGRESS_BLOCK_TABLE
) {
1478 g_cBlockTableStatus
= IN_PROGRESS_BLOCK_TABLE
;
1479 FTL_Write_IN_Progress_Block_Table_Page();
1486 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1487 * Function: FTL_Cache_Write_All
1488 * Inputs: pointer to cache in sys memory
1489 * address of free block in flash
1490 * Outputs: PASS=0 / FAIL=1
1491 * Description: writes all the pages of the block in cache to flash
1493 * NOTE:need to make sure this works ok when cache is limited
1494 * to a partial block. This is where copy-back would be
1495 * activated. This would require knowing which pages in the
1496 * cached block are clean/dirty.Right now we only know if
1497 * the whole block is clean/dirty.
1498 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1499 static int FTL_Cache_Write_All(u8
*pData
, u64 blk_addr
)
1506 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
1507 __FILE__
, __LINE__
, __func__
);
1509 nand_dbg_print(NAND_DBG_DEBUG
, "This block %d going to be written "
1510 "on %d\n", cache_block_to_write
,
1511 (u32
)(blk_addr
>> DeviceInfo
.nBitsInBlockDataSize
));
1513 Block
= BLK_FROM_ADDR(blk_addr
);
1514 Page
= PAGE_FROM_ADDR(blk_addr
, Block
);
1515 PageCount
= Cache
.pages_per_item
;
1518 if (FAIL
== GLOB_LLD_Write_Page_Main_cdma(pData
,
1519 Block
, Page
, PageCount
)) {
1520 nand_dbg_print(NAND_DBG_WARN
,
1521 "NAND Program fail in %s, Line %d, "
1522 "Function: %s, new Bad Block %d generated! "
1523 "Need Bad Block replacing.\n",
1524 __FILE__
, __LINE__
, __func__
, Block
);
1529 if (FAIL
== GLOB_LLD_Write_Page_Main(pData
, Block
, Page
, PageCount
)) {
1530 nand_dbg_print(NAND_DBG_WARN
, "NAND Program fail in %s,"
1531 " Line %d, Function %s, new Bad Block %d generated!"
1532 "Need Bad Block replacing.\n",
1533 __FILE__
, __LINE__
, __func__
, Block
);
1540 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1541 * Function: FTL_Cache_Update_Block
1542 * Inputs: pointer to buffer,page address,block address
1543 * Outputs: PASS=0 / FAIL=1
1544 * Description: It updates the cache
1545 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1546 static int FTL_Cache_Update_Block(u8
*pData
,
1547 u64 old_page_addr
, u64 blk_addr
)
1558 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
1559 __FILE__
, __LINE__
, __func__
);
1561 old_blk_addr
= (u64
)(old_page_addr
>>
1562 DeviceInfo
.nBitsInBlockDataSize
) * DeviceInfo
.wBlockDataSize
;
1563 page_offset
= (u16
)(GLOB_u64_Remainder(old_page_addr
, 2) >>
1564 DeviceInfo
.nBitsInPageDataSize
);
1566 for (i
= 0; i
< DeviceInfo
.wPagesPerBlock
; i
+= Cache
.pages_per_item
) {
1567 page_addr
= old_blk_addr
+ i
* DeviceInfo
.wPageDataSize
;
1568 if (i
!= page_offset
) {
1569 wFoundInCache
= FAIL
;
1570 for (j
= 0; j
< CACHE_ITEM_NUM
; j
++) {
1571 addr
= Cache
.array
[j
].address
;
1572 addr
= FTL_Get_Physical_Block_Addr(addr
) +
1573 GLOB_u64_Remainder(addr
, 2);
1574 if ((addr
>= page_addr
) && addr
<
1575 (page_addr
+ Cache
.cache_item_size
)) {
1576 wFoundInCache
= PASS
;
1577 buf
= Cache
.array
[j
].buf
;
1578 Cache
.array
[j
].changed
= SET
;
1580 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1581 int_cache
[ftl_cmd_cnt
].item
= j
;
1582 int_cache
[ftl_cmd_cnt
].cache
.address
=
1583 Cache
.array
[j
].address
;
1584 int_cache
[ftl_cmd_cnt
].cache
.changed
=
1585 Cache
.array
[j
].changed
;
1591 if (FAIL
== wFoundInCache
) {
1592 if (ERR
== FTL_Cache_Read_All(g_pTempBuf
,
1603 if (FAIL
== FTL_Cache_Write_All(buf
,
1604 blk_addr
+ (page_addr
- old_blk_addr
))) {
1613 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1614 * Function: FTL_Copy_Block
1615 * Inputs: source block address
1616 * Destination block address
1617 * Outputs: PASS=0 / FAIL=1
1618 * Description: used only for static wear leveling to move the block
1619 * containing static data to new blocks(more worn)
1620 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1621 int FTL_Copy_Block(u64 old_blk_addr
, u64 blk_addr
)
1623 int i
, r1
, r2
, wResult
= PASS
;
1625 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
1626 __FILE__
, __LINE__
, __func__
);
1628 for (i
= 0; i
< DeviceInfo
.wPagesPerBlock
; i
+= Cache
.pages_per_item
) {
1629 r1
= FTL_Cache_Read_All(g_pTempBuf
, old_blk_addr
+
1630 i
* DeviceInfo
.wPageDataSize
);
1631 r2
= FTL_Cache_Write_All(g_pTempBuf
, blk_addr
+
1632 i
* DeviceInfo
.wPageDataSize
);
1633 if ((ERR
== r1
) || (FAIL
== r2
)) {
1642 /* Search the block table to find out the least wear block and then return it */
1643 static u32
find_least_worn_blk_for_l2_cache(void)
1646 u32
*pbt
= (u32
*)g_pBlockTable
;
1647 u8 least_wear_cnt
= MAX_BYTE_VALUE
;
1648 u32 least_wear_blk_idx
= MAX_U32_VALUE
;
1651 for (i
= 0; i
< DeviceInfo
.wDataBlockNum
; i
++) {
1652 if (IS_SPARE_BLOCK(i
)) {
1653 phy_idx
= (u32
)((~BAD_BLOCK
) & pbt
[i
]);
1654 if (phy_idx
> DeviceInfo
.wSpectraEndBlock
)
1655 printk(KERN_ERR
"find_least_worn_blk_for_l2_cache: "
1656 "Too big phy block num (%d)\n", phy_idx
);
1657 if (g_pWearCounter
[phy_idx
-DeviceInfo
.wSpectraStartBlock
] < least_wear_cnt
) {
1658 least_wear_cnt
= g_pWearCounter
[phy_idx
- DeviceInfo
.wSpectraStartBlock
];
1659 least_wear_blk_idx
= i
;
1664 nand_dbg_print(NAND_DBG_WARN
,
1665 "find_least_worn_blk_for_l2_cache: "
1666 "find block %d with least worn counter (%d)\n",
1667 least_wear_blk_idx
, least_wear_cnt
);
1669 return least_wear_blk_idx
;
1674 /* Get blocks for Level2 Cache */
1675 static int get_l2_cache_blks(void)
1679 u32
*pbt
= (u32
*)g_pBlockTable
;
1681 for (n
= 0; n
< BLK_NUM_FOR_L2_CACHE
; n
++) {
1682 blk
= find_least_worn_blk_for_l2_cache();
1683 if (blk
> DeviceInfo
.wDataBlockNum
) {
1684 nand_dbg_print(NAND_DBG_WARN
,
1685 "find_least_worn_blk_for_l2_cache: "
1686 "No enough free NAND blocks (n: %d) for L2 Cache!\n", n
);
1689 /* Tag the free block as discard in block table */
1690 pbt
[blk
] = (pbt
[blk
] & (~BAD_BLOCK
)) | DISCARD_BLOCK
;
1691 /* Add the free block to the L2 Cache block array */
1692 cache_l2
.blk_array
[n
] = pbt
[blk
] & (~BAD_BLOCK
);
1698 static int erase_l2_cache_blocks(void)
1703 u32
*pbt
= (u32
*)g_pBlockTable
;
1705 nand_dbg_print(NAND_DBG_WARN
, "%s, Line %d, Function: %s\n",
1706 __FILE__
, __LINE__
, __func__
);
1708 for (i
= 0; i
< BLK_NUM_FOR_L2_CACHE
; i
++) {
1709 pblk
= cache_l2
.blk_array
[i
];
1711 /* If the L2 cache block is invalid, then just skip it */
1712 if (MAX_U32_VALUE
== pblk
)
1715 BUG_ON(pblk
> DeviceInfo
.wSpectraEndBlock
);
1717 addr
= (u64
)pblk
<< DeviceInfo
.nBitsInBlockDataSize
;
1718 if (PASS
== GLOB_FTL_Block_Erase(addr
)) {
1719 /* Get logical block number of the erased block */
1720 lblk
= FTL_Get_Block_Index(pblk
);
1721 BUG_ON(BAD_BLOCK
== lblk
);
1722 /* Tag it as free in the block table */
1723 pbt
[lblk
] &= (u32
)(~DISCARD_BLOCK
);
1724 pbt
[lblk
] |= (u32
)(SPARE_BLOCK
);
1726 MARK_BLOCK_AS_BAD(pbt
[lblk
]);
1735 * Merge the valid data page in the L2 cache blocks into NAND.
1737 static int flush_l2_cache(void)
1739 struct list_head
*p
;
1740 struct spectra_l2_cache_list
*pnd
, *tmp_pnd
;
1741 u32
*pbt
= (u32
*)g_pBlockTable
;
1742 u32 phy_blk
, l2_blk
;
1747 nand_dbg_print(NAND_DBG_WARN
, "%s, Line %d, Function: %s\n",
1748 __FILE__
, __LINE__
, __func__
);
1750 if (list_empty(&cache_l2
.table
.list
)) /* No data to flush */
1753 //dump_cache_l2_table();
1755 if (IN_PROGRESS_BLOCK_TABLE
!= g_cBlockTableStatus
) {
1756 g_cBlockTableStatus
= IN_PROGRESS_BLOCK_TABLE
;
1757 FTL_Write_IN_Progress_Block_Table_Page();
1760 list_for_each(p
, &cache_l2
.table
.list
) {
1761 pnd
= list_entry(p
, struct spectra_l2_cache_list
, list
);
1762 if (IS_SPARE_BLOCK(pnd
->logical_blk_num
) ||
1763 IS_BAD_BLOCK(pnd
->logical_blk_num
) ||
1764 IS_DISCARDED_BLOCK(pnd
->logical_blk_num
)) {
1765 nand_dbg_print(NAND_DBG_WARN
, "%s, Line %d\n", __FILE__
, __LINE__
);
1766 memset(cache_l2_blk_buf
, 0xff, DeviceInfo
.wPagesPerBlock
* DeviceInfo
.wPageDataSize
);
1768 nand_dbg_print(NAND_DBG_WARN
, "%s, Line %d\n", __FILE__
, __LINE__
);
1769 phy_blk
= pbt
[pnd
->logical_blk_num
] & (~BAD_BLOCK
);
1770 ret
= GLOB_LLD_Read_Page_Main(cache_l2_blk_buf
,
1771 phy_blk
, 0, DeviceInfo
.wPagesPerBlock
);
1773 printk(KERN_ERR
"Read NAND page fail in %s, Line %d\n", __FILE__
, __LINE__
);
1777 for (i
= 0; i
< DeviceInfo
.wPagesPerBlock
; i
++) {
1778 if (pnd
->pages_array
[i
] != MAX_U32_VALUE
) {
1779 l2_blk
= cache_l2
.blk_array
[(pnd
->pages_array
[i
] >> 16) & 0xffff];
1780 l2_page
= pnd
->pages_array
[i
] & 0xffff;
1781 ret
= GLOB_LLD_Read_Page_Main(cache_l2_page_buf
, l2_blk
, l2_page
, 1);
1783 printk(KERN_ERR
"Read NAND page fail in %s, Line %d\n", __FILE__
, __LINE__
);
1785 memcpy(cache_l2_blk_buf
+ i
* DeviceInfo
.wPageDataSize
, cache_l2_page_buf
, DeviceInfo
.wPageDataSize
);
1789 /* Find a free block and tag the original block as discarded */
1790 addr
= (u64
)pnd
->logical_blk_num
<< DeviceInfo
.nBitsInBlockDataSize
;
1791 ret
= FTL_Replace_Block(addr
);
1793 printk(KERN_ERR
"FTL_Replace_Block fail in %s, Line %d\n", __FILE__
, __LINE__
);
1796 /* Write back the updated data into NAND */
1797 phy_blk
= pbt
[pnd
->logical_blk_num
] & (~BAD_BLOCK
);
1798 if (FAIL
== GLOB_LLD_Write_Page_Main(cache_l2_blk_buf
, phy_blk
, 0, DeviceInfo
.wPagesPerBlock
)) {
1799 nand_dbg_print(NAND_DBG_WARN
,
1800 "Program NAND block %d fail in %s, Line %d\n",
1801 phy_blk
, __FILE__
, __LINE__
);
1802 /* This may not be really a bad block. So just tag it as discarded. */
1803 /* Then it has a chance to be erased when garbage collection. */
1804 /* If it is really bad, then the erase will fail and it will be marked */
1805 /* as bad then. Otherwise it will be marked as free and can be used again */
1806 MARK_BLK_AS_DISCARD(pbt
[pnd
->logical_blk_num
]);
1807 /* Find another free block and write it again */
1808 FTL_Replace_Block(addr
);
1809 phy_blk
= pbt
[pnd
->logical_blk_num
] & (~BAD_BLOCK
);
1810 if (FAIL
== GLOB_LLD_Write_Page_Main(cache_l2_blk_buf
, phy_blk
, 0, DeviceInfo
.wPagesPerBlock
)) {
1811 printk(KERN_ERR
"Failed to write back block %d when flush L2 cache."
1812 "Some data will be lost!\n", phy_blk
);
1813 MARK_BLOCK_AS_BAD(pbt
[pnd
->logical_blk_num
]);
1816 /* tag the new free block as used block */
1817 pbt
[pnd
->logical_blk_num
] &= (~SPARE_BLOCK
);
1821 /* Destroy the L2 Cache table and free the memory of all nodes */
1822 list_for_each_entry_safe(pnd
, tmp_pnd
, &cache_l2
.table
.list
, list
) {
1823 list_del(&pnd
->list
);
1827 /* Erase discard L2 cache blocks */
1828 if (erase_l2_cache_blocks() != PASS
)
1829 nand_dbg_print(NAND_DBG_WARN
,
1830 " Erase L2 cache blocks error in %s, Line %d\n",
1831 __FILE__
, __LINE__
);
1833 /* Init the Level2 Cache data structure */
1834 for (i
= 0; i
< BLK_NUM_FOR_L2_CACHE
; i
++)
1835 cache_l2
.blk_array
[i
] = MAX_U32_VALUE
;
1836 cache_l2
.cur_blk_idx
= 0;
1837 cache_l2
.cur_page_num
= 0;
1838 INIT_LIST_HEAD(&cache_l2
.table
.list
);
1839 cache_l2
.table
.logical_blk_num
= MAX_U32_VALUE
;
1845 * Write back a changed victim cache item to the Level2 Cache
1846 * and update the L2 Cache table to map the change.
1847 * If the L2 Cache is full, then start to do the L2 Cache flush.
1849 static int write_back_to_l2_cache(u8
*buf
, u64 logical_addr
)
1851 u32 logical_blk_num
;
1852 u16 logical_page_num
;
1853 struct list_head
*p
;
1854 struct spectra_l2_cache_list
*pnd
, *pnd_new
;
1858 nand_dbg_print(NAND_DBG_DEBUG
, "%s, Line %d, Function: %s\n",
1859 __FILE__
, __LINE__
, __func__
);
1862 * If Level2 Cache table is empty, then it means either:
1863 * 1. This is the first time that the function called after FTL_init
1865 * 2. The Level2 Cache has just been flushed
1867 * So, 'steal' some free blocks from NAND for L2 Cache using
1868 * by just mask them as discard in the block table
1870 if (list_empty(&cache_l2
.table
.list
)) {
1871 BUG_ON(cache_l2
.cur_blk_idx
!= 0);
1872 BUG_ON(cache_l2
.cur_page_num
!= 0);
1873 BUG_ON(cache_l2
.table
.logical_blk_num
!= MAX_U32_VALUE
);
1874 if (FAIL
== get_l2_cache_blks()) {
1875 GLOB_FTL_Garbage_Collection();
1876 if (FAIL
== get_l2_cache_blks()) {
1877 printk(KERN_ALERT
"Fail to get L2 cache blks!\n");
1883 logical_blk_num
= BLK_FROM_ADDR(logical_addr
);
1884 logical_page_num
= PAGE_FROM_ADDR(logical_addr
, logical_blk_num
);
1885 BUG_ON(logical_blk_num
== MAX_U32_VALUE
);
1887 /* Write the cache item data into the current position of L2 Cache */
1893 if (FAIL
== GLOB_LLD_Write_Page_Main(buf
,
1894 cache_l2
.blk_array
[cache_l2
.cur_blk_idx
],
1895 cache_l2
.cur_page_num
, 1)) {
1896 nand_dbg_print(NAND_DBG_WARN
, "NAND Program fail in "
1897 "%s, Line %d, new Bad Block %d generated!\n",
1899 cache_l2
.blk_array
[cache_l2
.cur_blk_idx
]);
1901 /* TODO: tag the current block as bad and try again */
1908 * Update the L2 Cache table.
1910 * First seaching in the table to see whether the logical block
1911 * has been mapped. If not, then kmalloc a new node for the
1912 * logical block, fill data, and then insert it to the list.
1913 * Otherwise, just update the mapped node directly.
1916 list_for_each(p
, &cache_l2
.table
.list
) {
1917 pnd
= list_entry(p
, struct spectra_l2_cache_list
, list
);
1918 if (pnd
->logical_blk_num
== logical_blk_num
) {
1919 pnd
->pages_array
[logical_page_num
] =
1920 (cache_l2
.cur_blk_idx
<< 16) |
1921 cache_l2
.cur_page_num
;
1926 if (!found
) { /* Create new node for the logical block here */
1928 /* The logical pages to physical pages map array is
1929 * located at the end of struct spectra_l2_cache_list.
1931 node_size
= sizeof(struct spectra_l2_cache_list
) +
1932 sizeof(u32
) * DeviceInfo
.wPagesPerBlock
;
1933 pnd_new
= kmalloc(node_size
, GFP_ATOMIC
);
1935 printk(KERN_ERR
"Failed to kmalloc in %s Line %d\n",
1936 __FILE__
, __LINE__
);
1938 * TODO: Need to flush all the L2 cache into NAND ASAP
1939 * since no memory available here
1942 pnd_new
->logical_blk_num
= logical_blk_num
;
1943 for (i
= 0; i
< DeviceInfo
.wPagesPerBlock
; i
++)
1944 pnd_new
->pages_array
[i
] = MAX_U32_VALUE
;
1945 pnd_new
->pages_array
[logical_page_num
] =
1946 (cache_l2
.cur_blk_idx
<< 16) | cache_l2
.cur_page_num
;
1947 list_add(&pnd_new
->list
, &cache_l2
.table
.list
);
1950 /* Increasing the current position pointer of the L2 Cache */
1951 cache_l2
.cur_page_num
++;
1952 if (cache_l2
.cur_page_num
>= DeviceInfo
.wPagesPerBlock
) {
1953 cache_l2
.cur_blk_idx
++;
1954 if (cache_l2
.cur_blk_idx
>= BLK_NUM_FOR_L2_CACHE
) {
1955 /* The L2 Cache is full. Need to flush it now */
1956 nand_dbg_print(NAND_DBG_WARN
,
1957 "L2 Cache is full, will start to flush it\n");
1960 cache_l2
.cur_page_num
= 0;
1968 * Seach in the Level2 Cache table to find the cache item.
1969 * If find, read the data from the NAND page of L2 Cache,
1970 * Otherwise, return FAIL.
1972 static int search_l2_cache(u8
*buf
, u64 logical_addr
)
1974 u32 logical_blk_num
;
1975 u16 logical_page_num
;
1976 struct list_head
*p
;
1977 struct spectra_l2_cache_list
*pnd
;
1978 u32 tmp
= MAX_U32_VALUE
;
1983 logical_blk_num
= BLK_FROM_ADDR(logical_addr
);
1984 logical_page_num
= PAGE_FROM_ADDR(logical_addr
, logical_blk_num
);
1986 list_for_each(p
, &cache_l2
.table
.list
) {
1987 pnd
= list_entry(p
, struct spectra_l2_cache_list
, list
);
1988 if (pnd
->logical_blk_num
== logical_blk_num
) {
1989 tmp
= pnd
->pages_array
[logical_page_num
];
1994 if (tmp
!= MAX_U32_VALUE
) { /* Found valid map */
1995 phy_blk
= cache_l2
.blk_array
[(tmp
>> 16) & 0xFFFF];
1996 phy_page
= tmp
& 0xFFFF;
2000 ret
= GLOB_LLD_Read_Page_Main(buf
, phy_blk
, phy_page
, 1);
2007 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2008 * Function: FTL_Cache_Write_Back
2009 * Inputs: pointer to data cached in sys memory
2010 * address of free block in flash
2011 * Outputs: PASS=0 / FAIL=1
2012 * Description: writes all the pages of Cache Block to flash
2014 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2015 static int FTL_Cache_Write_Back(u8
*pData
, u64 blk_addr
)
2018 u64 old_page_addr
, addr
, phy_addr
;
2019 u32
*pbt
= (u32
*)g_pBlockTable
;
2022 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
2023 __FILE__
, __LINE__
, __func__
);
2025 old_page_addr
= FTL_Get_Physical_Block_Addr(blk_addr
) +
2026 GLOB_u64_Remainder(blk_addr
, 2);
2028 iErase
= (FAIL
== FTL_Replace_Block(blk_addr
)) ? PASS
: FAIL
;
2030 pbt
[BLK_FROM_ADDR(blk_addr
)] &= (~SPARE_BLOCK
);
2033 p_BTableChangesDelta
= (struct BTableChangesDelta
*)g_pBTDelta_Free
;
2034 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
2036 p_BTableChangesDelta
->ftl_cmd_cnt
= ftl_cmd_cnt
;
2037 p_BTableChangesDelta
->BT_Index
= (u32
)(blk_addr
>>
2038 DeviceInfo
.nBitsInBlockDataSize
);
2039 p_BTableChangesDelta
->BT_Entry_Value
=
2040 pbt
[(u32
)(blk_addr
>> DeviceInfo
.nBitsInBlockDataSize
)];
2041 p_BTableChangesDelta
->ValidFields
= 0x0C;
2044 if (IN_PROGRESS_BLOCK_TABLE
!= g_cBlockTableStatus
) {
2045 g_cBlockTableStatus
= IN_PROGRESS_BLOCK_TABLE
;
2046 FTL_Write_IN_Progress_Block_Table_Page();
2049 for (i
= 0; i
< RETRY_TIMES
; i
++) {
2050 if (PASS
== iErase
) {
2051 phy_addr
= FTL_Get_Physical_Block_Addr(blk_addr
);
2052 if (FAIL
== GLOB_FTL_Block_Erase(phy_addr
)) {
2053 lba
= BLK_FROM_ADDR(blk_addr
);
2054 MARK_BLOCK_AS_BAD(pbt
[lba
]);
2060 for (j
= 0; j
< CACHE_ITEM_NUM
; j
++) {
2061 addr
= Cache
.array
[j
].address
;
2062 if ((addr
<= blk_addr
) &&
2063 ((addr
+ Cache
.cache_item_size
) > blk_addr
))
2064 cache_block_to_write
= j
;
2067 phy_addr
= FTL_Get_Physical_Block_Addr(blk_addr
);
2068 if (PASS
== FTL_Cache_Update_Block(pData
,
2069 old_page_addr
, phy_addr
)) {
2070 cache_block_to_write
= UNHIT_CACHE_ITEM
;
2077 if (i
>= RETRY_TIMES
) {
2078 if (ERR
== FTL_Flash_Error_Handle(pData
,
2079 old_page_addr
, blk_addr
))
2088 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2089 * Function: FTL_Cache_Write_Page
2090 * Inputs: Pointer to buffer, page address, cache block number
2091 * Outputs: PASS=0 / FAIL=1
2092 * Description: It writes the data in Cache Block
2093 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2094 static void FTL_Cache_Write_Page(u8
*pData
, u64 page_addr
,
2095 u8 cache_blk
, u16 flag
)
2100 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
2101 __FILE__
, __LINE__
, __func__
);
2103 addr
= Cache
.array
[cache_blk
].address
;
2104 pDest
= Cache
.array
[cache_blk
].buf
;
2106 pDest
+= (unsigned long)(page_addr
- addr
);
2107 Cache
.array
[cache_blk
].changed
= SET
;
2109 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
2110 int_cache
[ftl_cmd_cnt
].item
= cache_blk
;
2111 int_cache
[ftl_cmd_cnt
].cache
.address
=
2112 Cache
.array
[cache_blk
].address
;
2113 int_cache
[ftl_cmd_cnt
].cache
.changed
=
2114 Cache
.array
[cache_blk
].changed
;
2116 GLOB_LLD_MemCopy_CMD(pDest
, pData
, DeviceInfo
.wPageDataSize
, flag
);
2119 memcpy(pDest
, pData
, DeviceInfo
.wPageDataSize
);
2121 if (Cache
.array
[cache_blk
].use_cnt
< MAX_WORD_VALUE
)
2122 Cache
.array
[cache_blk
].use_cnt
++;
2125 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2126 * Function: FTL_Cache_Write
2128 * Outputs: PASS=0 / FAIL=1
2129 * Description: It writes least frequently used Cache block to flash if it
2131 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2132 static int FTL_Cache_Write(void)
2134 int i
, bResult
= PASS
;
2135 u16 bNO
, least_count
= 0xFFFF;
2137 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
2138 __FILE__
, __LINE__
, __func__
);
2140 FTL_Calculate_LRU();
2143 nand_dbg_print(NAND_DBG_DEBUG
, "FTL_Cache_Write: "
2144 "Least used cache block is %d\n", bNO
);
2146 if (Cache
.array
[bNO
].changed
!= SET
)
2149 nand_dbg_print(NAND_DBG_DEBUG
, "FTL_Cache_Write: Cache"
2150 " Block %d containing logical block %d is dirty\n",
2152 (u32
)(Cache
.array
[bNO
].address
>>
2153 DeviceInfo
.nBitsInBlockDataSize
));
2155 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
2156 int_cache
[ftl_cmd_cnt
].item
= bNO
;
2157 int_cache
[ftl_cmd_cnt
].cache
.address
=
2158 Cache
.array
[bNO
].address
;
2159 int_cache
[ftl_cmd_cnt
].cache
.changed
= CLEAR
;
2162 bResult
= write_back_to_l2_cache(Cache
.array
[bNO
].buf
,
2163 Cache
.array
[bNO
].address
);
2165 Cache
.array
[bNO
].changed
= CLEAR
;
2167 least_count
= Cache
.array
[bNO
].use_cnt
;
2169 for (i
= 0; i
< CACHE_ITEM_NUM
; i
++) {
2172 if (Cache
.array
[i
].use_cnt
> 0)
2173 Cache
.array
[i
].use_cnt
-= least_count
;
2179 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2180 * Function: FTL_Cache_Read
2181 * Inputs: Page address
2182 * Outputs: PASS=0 / FAIL=1
2183 * Description: It reads the block from device in Cache Block
2184 * Set the LRU count to 1
2185 * Mark the Cache Block as clean
2186 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2187 static int FTL_Cache_Read(u64 logical_addr
)
2189 u64 item_addr
, phy_addr
;
2193 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
2194 __FILE__
, __LINE__
, __func__
);
2196 num
= Cache
.LRU
; /* The LRU cache item will be overwritten */
2198 item_addr
= (u64
)GLOB_u64_Div(logical_addr
, Cache
.cache_item_size
) *
2199 Cache
.cache_item_size
;
2200 Cache
.array
[num
].address
= item_addr
;
2201 Cache
.array
[num
].use_cnt
= 1;
2202 Cache
.array
[num
].changed
= CLEAR
;
2205 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
2206 int_cache
[ftl_cmd_cnt
].item
= num
;
2207 int_cache
[ftl_cmd_cnt
].cache
.address
=
2208 Cache
.array
[num
].address
;
2209 int_cache
[ftl_cmd_cnt
].cache
.changed
=
2210 Cache
.array
[num
].changed
;
2214 * Search in L2 Cache. If hit, fill data into L1 Cache item buffer,
2215 * Otherwise, read it from NAND
2217 ret
= search_l2_cache(Cache
.array
[num
].buf
, logical_addr
);
2218 if (PASS
== ret
) /* Hit in L2 Cache */
2221 /* Compute the physical start address of NAND device according to */
2222 /* the logical start address of the cache item (LRU cache item) */
2223 phy_addr
= FTL_Get_Physical_Block_Addr(item_addr
) +
2224 GLOB_u64_Remainder(item_addr
, 2);
2226 return FTL_Cache_Read_All(Cache
.array
[num
].buf
, phy_addr
);
2229 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2230 * Function: FTL_Check_Block_Table
2232 * Outputs: PASS=0 / FAIL=1
2233 * Description: It checks the correctness of each block table entry
2234 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2235 static int FTL_Check_Block_Table(int wOldTable
)
2240 u32
*pbt
= (u32
*)g_pBlockTable
;
2241 u8
*pFlag
= flag_check_blk_table
;
2243 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
2244 __FILE__
, __LINE__
, __func__
);
2246 if (NULL
!= pFlag
) {
2247 memset(pFlag
, FAIL
, DeviceInfo
.wDataBlockNum
);
2248 for (i
= 0; i
< DeviceInfo
.wDataBlockNum
; i
++) {
2249 blk_idx
= (u32
)(pbt
[i
] & (~BAD_BLOCK
));
2252 * 20081006/KBV - Changed to pFlag[i] reference
2253 * to avoid buffer overflow
2257 * 2008-10-20 Yunpeng Note: This change avoid
2258 * buffer overflow, but changed function of
2259 * the code, so it should be re-write later
2261 if ((blk_idx
> DeviceInfo
.wSpectraEndBlock
) ||
2275 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2276 * Function: FTL_Write_Block_Table
2278 * Outputs: 0=Block Table was updated. No write done. 1=Block write needs to
2280 * Description: It writes the block table
2281 * Block table always mapped to LBA 0 which inturn mapped
2282 * to any physical block
2283 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2284 static int FTL_Write_Block_Table(int wForce
)
2286 u32
*pbt
= (u32
*)g_pBlockTable
;
2287 int wSuccess
= PASS
;
2288 u32 wTempBlockTableIndex
;
2289 u16 bt_pages
, new_bt_offset
;
2290 u8 blockchangeoccured
= 0;
2292 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
2293 __FILE__
, __LINE__
, __func__
);
2295 bt_pages
= FTL_Get_Block_Table_Flash_Size_Pages();
2297 if (IN_PROGRESS_BLOCK_TABLE
!= g_cBlockTableStatus
)
2300 if (PASS
== wForce
) {
2301 g_wBlockTableOffset
=
2302 (u16
)(DeviceInfo
.wPagesPerBlock
- bt_pages
);
2304 p_BTableChangesDelta
=
2305 (struct BTableChangesDelta
*)g_pBTDelta_Free
;
2306 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
2308 p_BTableChangesDelta
->ftl_cmd_cnt
= ftl_cmd_cnt
;
2309 p_BTableChangesDelta
->g_wBlockTableOffset
=
2310 g_wBlockTableOffset
;
2311 p_BTableChangesDelta
->ValidFields
= 0x01;
2315 nand_dbg_print(NAND_DBG_DEBUG
,
2316 "Inside FTL_Write_Block_Table: block %d Page:%d\n",
2317 g_wBlockTableIndex
, g_wBlockTableOffset
);
2320 new_bt_offset
= g_wBlockTableOffset
+ bt_pages
+ 1;
2321 if ((0 == (new_bt_offset
% DeviceInfo
.wPagesPerBlock
)) ||
2322 (new_bt_offset
> DeviceInfo
.wPagesPerBlock
) ||
2323 (FAIL
== wSuccess
)) {
2324 wTempBlockTableIndex
= FTL_Replace_Block_Table();
2325 if (BAD_BLOCK
== wTempBlockTableIndex
)
2327 if (!blockchangeoccured
) {
2328 bt_block_changed
= 1;
2329 blockchangeoccured
= 1;
2332 g_wBlockTableIndex
= wTempBlockTableIndex
;
2333 g_wBlockTableOffset
= 0;
2334 pbt
[BLOCK_TABLE_INDEX
] = g_wBlockTableIndex
;
2336 p_BTableChangesDelta
=
2337 (struct BTableChangesDelta
*)g_pBTDelta_Free
;
2338 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
2340 p_BTableChangesDelta
->ftl_cmd_cnt
=
2342 p_BTableChangesDelta
->g_wBlockTableOffset
=
2343 g_wBlockTableOffset
;
2344 p_BTableChangesDelta
->g_wBlockTableIndex
=
2346 p_BTableChangesDelta
->ValidFields
= 0x03;
2348 p_BTableChangesDelta
=
2349 (struct BTableChangesDelta
*)g_pBTDelta_Free
;
2351 sizeof(struct BTableChangesDelta
);
2353 p_BTableChangesDelta
->ftl_cmd_cnt
=
2355 p_BTableChangesDelta
->BT_Index
=
2357 p_BTableChangesDelta
->BT_Entry_Value
=
2358 pbt
[BLOCK_TABLE_INDEX
];
2359 p_BTableChangesDelta
->ValidFields
= 0x0C;
2363 wSuccess
= FTL_Write_Block_Table_Data();
2364 if (FAIL
== wSuccess
)
2365 MARK_BLOCK_AS_BAD(pbt
[BLOCK_TABLE_INDEX
]);
2366 } while (FAIL
== wSuccess
);
2368 g_cBlockTableStatus
= CURRENT_BLOCK_TABLE
;
2373 /******************************************************************
2374 * Function: GLOB_FTL_Flash_Format
2377 * Description: The block table stores bad block info, including MDF+
2378 * blocks gone bad over the ages. Therefore, if we have a
2379 * block table in place, then use it to scan for bad blocks
2380 * If not, then scan for MDF.
2381 * Now, a block table will only be found if spectra was already
2382 * being used. For a fresh flash, we'll go thru scanning for
2383 * MDF. If spectra was being used, then there is a chance that
2384 * the MDF has been corrupted. Spectra avoids writing to the
2385 * first 2 bytes of the spare area to all pages in a block. This
2386 * covers all known flash devices. However, since flash
2387 * manufacturers have no standard of where the MDF is stored,
2388 * this cannot guarantee that the MDF is protected for future
2389 * devices too. The initial scanning for the block table assures
2390 * this. It is ok even if the block table is outdated, as all
2391 * we're looking for are bad block markers.
2392 * Use this when mounting a file system or starting a
2395 *********************************************************************/
2396 static int FTL_Format_Flash(u8 valid_block_table
)
2399 u32
*pbt
= (u32
*)g_pBlockTable
;
2404 u32
*pbtStartingCopy
= (u32
*)g_pBTStartingCopy
;
2409 if (FAIL
== FTL_Check_Block_Table(FAIL
))
2410 valid_block_table
= 0;
2412 if (valid_block_table
) {
2416 k
= DeviceInfo
.wSpectraStartBlock
;
2417 while (switched
&& (k
< DeviceInfo
.wSpectraEndBlock
)) {
2420 for (j
= DeviceInfo
.wSpectraStartBlock
, i
= 0;
2421 j
<= DeviceInfo
.wSpectraEndBlock
;
2423 block
= (pbt
[i
] & ~BAD_BLOCK
) -
2424 DeviceInfo
.wSpectraStartBlock
;
2428 pbt
[i
] = pbt
[block
];
2429 pbt
[block
] = tempNode
;
2433 if ((k
== DeviceInfo
.wSpectraEndBlock
) && switched
)
2434 valid_block_table
= 0;
2437 if (!valid_block_table
) {
2438 memset(g_pBlockTable
, 0,
2439 DeviceInfo
.wDataBlockNum
* sizeof(u32
));
2440 memset(g_pWearCounter
, 0,
2441 DeviceInfo
.wDataBlockNum
* sizeof(u8
));
2442 if (DeviceInfo
.MLCDevice
)
2443 memset(g_pReadCounter
, 0,
2444 DeviceInfo
.wDataBlockNum
* sizeof(u16
));
2446 memset(g_pBTStartingCopy
, 0,
2447 DeviceInfo
.wDataBlockNum
* sizeof(u32
));
2448 memset(g_pWearCounterCopy
, 0,
2449 DeviceInfo
.wDataBlockNum
* sizeof(u8
));
2450 if (DeviceInfo
.MLCDevice
)
2451 memset(g_pReadCounterCopy
, 0,
2452 DeviceInfo
.wDataBlockNum
* sizeof(u16
));
2454 for (j
= DeviceInfo
.wSpectraStartBlock
, i
= 0;
2455 j
<= DeviceInfo
.wSpectraEndBlock
;
2457 if (GLOB_LLD_Get_Bad_Block((u32
)j
))
2458 pbt
[i
] = (u32
)(BAD_BLOCK
| j
);
2462 nand_dbg_print(NAND_DBG_WARN
, "Erasing all blocks in the NAND\n");
2464 for (j
= DeviceInfo
.wSpectraStartBlock
, i
= 0;
2465 j
<= DeviceInfo
.wSpectraEndBlock
;
2467 if ((pbt
[i
] & BAD_BLOCK
) != BAD_BLOCK
) {
2468 ret
= GLOB_LLD_Erase_Block(j
);
2471 MARK_BLOCK_AS_BAD(pbt
[i
]);
2472 nand_dbg_print(NAND_DBG_WARN
,
2473 "NAND Program fail in %s, Line %d, "
2474 "Function: %s, new Bad Block %d generated!\n",
2475 __FILE__
, __LINE__
, __func__
, (int)j
);
2477 pbt
[i
] = (u32
)(SPARE_BLOCK
| j
);
2481 pbtStartingCopy
[i
] = pbt
[i
];
2485 g_wBlockTableOffset
= 0;
2486 for (i
= 0; (i
<= (DeviceInfo
.wSpectraEndBlock
-
2487 DeviceInfo
.wSpectraStartBlock
))
2488 && ((pbt
[i
] & BAD_BLOCK
) == BAD_BLOCK
); i
++)
2490 if (i
> (DeviceInfo
.wSpectraEndBlock
- DeviceInfo
.wSpectraStartBlock
)) {
2491 printk(KERN_ERR
"All blocks bad!\n");
2494 g_wBlockTableIndex
= pbt
[i
] & ~BAD_BLOCK
;
2495 if (i
!= BLOCK_TABLE_INDEX
) {
2497 pbt
[i
] = pbt
[BLOCK_TABLE_INDEX
];
2498 pbt
[BLOCK_TABLE_INDEX
] = tempNode
;
2501 pbt
[BLOCK_TABLE_INDEX
] &= (~SPARE_BLOCK
);
2504 pbtStartingCopy
[BLOCK_TABLE_INDEX
] &= (~SPARE_BLOCK
);
2507 g_cBlockTableStatus
= IN_PROGRESS_BLOCK_TABLE
;
2508 memset(g_pBTBlocks
, 0xFF,
2509 (1 + LAST_BT_ID
- FIRST_BT_ID
) * sizeof(u32
));
2510 g_pBTBlocks
[FIRST_BT_ID
-FIRST_BT_ID
] = g_wBlockTableIndex
;
2511 FTL_Write_Block_Table(FAIL
);
2513 for (i
= 0; i
< CACHE_ITEM_NUM
; i
++) {
2514 Cache
.array
[i
].address
= NAND_CACHE_INIT_ADDR
;
2515 Cache
.array
[i
].use_cnt
= 0;
2516 Cache
.array
[i
].changed
= CLEAR
;
2519 #if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA)
2520 memcpy((void *)&cache_start_copy
, (void *)&Cache
,
2521 sizeof(struct flash_cache_tag
));
2526 static int force_format_nand(void)
2530 /* Force erase the whole unprotected physical partiton of NAND */
2531 printk(KERN_ALERT
"Start to force erase whole NAND device ...\n");
2532 printk(KERN_ALERT
"From phyical block %d to %d\n",
2533 DeviceInfo
.wSpectraStartBlock
, DeviceInfo
.wSpectraEndBlock
);
2534 for (i
= DeviceInfo
.wSpectraStartBlock
; i
<= DeviceInfo
.wSpectraEndBlock
; i
++) {
2535 if (GLOB_LLD_Erase_Block(i
))
2536 printk(KERN_ERR
"Failed to force erase NAND block %d\n", i
);
2538 printk(KERN_ALERT
"Force Erase ends. Please reboot the system ...\n");
2544 int GLOB_FTL_Flash_Format(void)
2546 //return FTL_Format_Flash(1);
2547 return force_format_nand();
2551 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2552 * Function: FTL_Search_Block_Table_IN_Block
2553 * Inputs: Block Number
2555 * Outputs: PASS / FAIL
2556 * Page contatining the block table
2557 * Description: It searches the block table in the block
2558 * passed as an argument.
2560 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2561 static int FTL_Search_Block_Table_IN_Block(u32 BT_Block
,
2562 u8 BT_Tag
, u16
*Page
)
2569 u8
*tempbuf
= tmp_buf_search_bt_in_block
;
2570 u8
*pSpareBuf
= spare_buf_search_bt_in_block
;
2571 u8
*pSpareBufBTLastPage
= spare_buf_bt_search_bt_in_block
;
2572 u8 bt_flag_last_page
= 0xFF;
2573 u8 search_in_previous_pages
= 0;
2576 nand_dbg_print(NAND_DBG_DEBUG
, "%s, Line %d, Function: %s\n",
2577 __FILE__
, __LINE__
, __func__
);
2579 nand_dbg_print(NAND_DBG_DEBUG
,
2580 "Searching block table in %u block\n",
2581 (unsigned int)BT_Block
);
2583 bt_pages
= FTL_Get_Block_Table_Flash_Size_Pages();
2585 for (i
= bt_pages
; i
< DeviceInfo
.wPagesPerBlock
;
2586 i
+= (bt_pages
+ 1)) {
2587 nand_dbg_print(NAND_DBG_DEBUG
,
2588 "Searching last IPF: %d\n", i
);
2589 Result
= GLOB_LLD_Read_Page_Main_Polling(tempbuf
,
2592 if (0 == memcmp(tempbuf
, g_pIPF
, DeviceInfo
.wPageDataSize
)) {
2593 if ((i
+ bt_pages
+ 1) < DeviceInfo
.wPagesPerBlock
) {
2596 search_in_previous_pages
= 1;
2601 if (!search_in_previous_pages
) {
2602 if (i
!= bt_pages
) {
2603 i
-= (bt_pages
+ 1);
2611 if (!search_in_previous_pages
) {
2613 nand_dbg_print(NAND_DBG_DEBUG
,
2614 "Reading the spare area of Block %u Page %u",
2615 (unsigned int)BT_Block
, i
);
2616 Result
= GLOB_LLD_Read_Page_Spare(pSpareBuf
,
2618 nand_dbg_print(NAND_DBG_DEBUG
,
2619 "Reading the spare area of Block %u Page %u",
2620 (unsigned int)BT_Block
, i
+ bt_pages
- 1);
2621 Result
= GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage
,
2622 BT_Block
, i
+ bt_pages
- 1, 1);
2625 j
= FTL_Extract_Block_Table_Tag(pSpareBuf
, &tagarray
);
2627 for (; k
< j
; k
++) {
2628 if (tagarray
[k
] == BT_Tag
)
2634 bt_flag
= tagarray
[k
];
2638 if (Result
== PASS
) {
2640 j
= FTL_Extract_Block_Table_Tag(
2641 pSpareBufBTLastPage
, &tagarray
);
2643 for (; k
< j
; k
++) {
2644 if (tagarray
[k
] == BT_Tag
)
2650 bt_flag_last_page
= tagarray
[k
];
2654 if (Result
== PASS
) {
2655 if (bt_flag
== bt_flag_last_page
) {
2656 nand_dbg_print(NAND_DBG_DEBUG
,
2657 "Block table is found"
2658 " in page after IPF "
2664 g_cBlockTableStatus
=
2665 CURRENT_BLOCK_TABLE
;
2674 if (search_in_previous_pages
)
2677 i
= i
- (bt_pages
+ 1);
2681 nand_dbg_print(NAND_DBG_DEBUG
,
2682 "Reading the spare area of Block %d Page %d",
2685 Result
= GLOB_LLD_Read_Page_Spare(pSpareBuf
, BT_Block
, i
, 1);
2686 nand_dbg_print(NAND_DBG_DEBUG
,
2687 "Reading the spare area of Block %u Page %u",
2688 (unsigned int)BT_Block
, i
+ bt_pages
- 1);
2690 Result
= GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage
,
2691 BT_Block
, i
+ bt_pages
- 1, 1);
2694 j
= FTL_Extract_Block_Table_Tag(pSpareBuf
, &tagarray
);
2696 for (; k
< j
; k
++) {
2697 if (tagarray
[k
] == BT_Tag
)
2703 bt_flag
= tagarray
[k
];
2707 if (Result
== PASS
) {
2709 j
= FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage
,
2712 for (; k
< j
; k
++) {
2713 if (tagarray
[k
] == BT_Tag
)
2719 bt_flag_last_page
= tagarray
[k
];
2725 if (Result
== PASS
) {
2726 if (bt_flag
== bt_flag_last_page
) {
2727 nand_dbg_print(NAND_DBG_DEBUG
,
2728 "Block table is found "
2729 "in page prior to IPF "
2730 "at block %u page %d\n",
2731 (unsigned int)BT_Block
, i
);
2734 g_cBlockTableStatus
=
2735 IN_PROGRESS_BLOCK_TABLE
;
2745 if (Result
== FAIL
) {
2746 if ((Last_IPF
> bt_pages
) && (i
< Last_IPF
) && (!BT_Found
)) {
2748 *Page
= i
- (bt_pages
+ 1);
2750 if ((Last_IPF
== bt_pages
) && (i
< Last_IPF
) && (!BT_Found
))
2754 if (Last_IPF
== 0) {
2757 nand_dbg_print(NAND_DBG_DEBUG
, "Reading the spare area of "
2758 "Block %u Page %u", (unsigned int)BT_Block
, i
);
2760 Result
= GLOB_LLD_Read_Page_Spare(pSpareBuf
, BT_Block
, i
, 1);
2761 nand_dbg_print(NAND_DBG_DEBUG
,
2762 "Reading the spare area of Block %u Page %u",
2763 (unsigned int)BT_Block
, i
+ bt_pages
- 1);
2764 Result
= GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage
,
2765 BT_Block
, i
+ bt_pages
- 1, 1);
2768 j
= FTL_Extract_Block_Table_Tag(pSpareBuf
, &tagarray
);
2770 for (; k
< j
; k
++) {
2771 if (tagarray
[k
] == BT_Tag
)
2777 bt_flag
= tagarray
[k
];
2781 if (Result
== PASS
) {
2783 j
= FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage
,
2786 for (; k
< j
; k
++) {
2787 if (tagarray
[k
] == BT_Tag
)
2793 bt_flag_last_page
= tagarray
[k
];
2797 if (Result
== PASS
) {
2798 if (bt_flag
== bt_flag_last_page
) {
2799 nand_dbg_print(NAND_DBG_DEBUG
,
2800 "Block table is found "
2801 "in page after IPF at "
2802 "block %u page %u\n",
2803 (unsigned int)BT_Block
,
2807 g_cBlockTableStatus
=
2808 CURRENT_BLOCK_TABLE
;
2823 u8
*get_blk_table_start_addr(void)
2825 return g_pBlockTable
;
2828 unsigned long get_blk_table_len(void)
2830 return DeviceInfo
.wDataBlockNum
* sizeof(u32
);
2833 u8
*get_wear_leveling_table_start_addr(void)
2835 return g_pWearCounter
;
2838 unsigned long get_wear_leveling_table_len(void)
2840 return DeviceInfo
.wDataBlockNum
* sizeof(u8
);
2843 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2844 * Function: FTL_Read_Block_Table
2846 * Outputs: PASS / FAIL
2847 * Description: read the flash spare area and find a block containing the
2848 * most recent block table(having largest block_table_counter).
2849 * Find the last written Block table in this block.
2850 * Check the correctness of Block Table
2851 * If CDMA is enabled, this function is called in
2853 * We don't need to store changes in Block table in this
2854 * function as it is called only at initialization
2856 * Note: Currently this function is called at initialization
2857 * before any read/erase/write command issued to flash so,
2858 * there is no need to wait for CDMA list to complete as of now
2859 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2860 static int FTL_Read_Block_Table(void)
2864 u8
*tempBuf
, *tagarray
;
2867 u8 block_table_found
= 0;
2873 int wBytesCopied
= 0, tempvar
;
2875 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
2876 __FILE__
, __LINE__
, __func__
);
2878 tempBuf
= tmp_buf1_read_blk_table
;
2879 bt_pages
= FTL_Get_Block_Table_Flash_Size_Pages();
2881 for (j
= DeviceInfo
.wSpectraStartBlock
;
2882 j
<= (int)DeviceInfo
.wSpectraEndBlock
;
2884 status
= GLOB_LLD_Read_Page_Spare(tempBuf
, j
, 0, 1);
2886 i
= FTL_Extract_Block_Table_Tag(tempBuf
, &tagarray
);
2888 status
= GLOB_LLD_Read_Page_Main_Polling(tempBuf
,
2890 for (; k
< i
; k
++) {
2891 if (tagarray
[k
] == tempBuf
[3])
2901 nand_dbg_print(NAND_DBG_DEBUG
,
2902 "Block table is contained in Block %d %d\n",
2903 (unsigned int)j
, (unsigned int)k
);
2905 if (g_pBTBlocks
[k
-FIRST_BT_ID
] == BTBLOCK_INVAL
) {
2906 g_pBTBlocks
[k
-FIRST_BT_ID
] = j
;
2907 block_table_found
= 1;
2909 printk(KERN_ERR
"FTL_Read_Block_Table -"
2910 "This should never happens. "
2911 "Two block table have same counter %u!\n", k
);
2915 if (block_table_found
) {
2916 if (g_pBTBlocks
[FIRST_BT_ID
- FIRST_BT_ID
] != BTBLOCK_INVAL
&&
2917 g_pBTBlocks
[LAST_BT_ID
- FIRST_BT_ID
] != BTBLOCK_INVAL
) {
2919 while ((j
> FIRST_BT_ID
) &&
2920 (g_pBTBlocks
[j
- FIRST_BT_ID
] != BTBLOCK_INVAL
))
2922 if (j
== FIRST_BT_ID
) {
2924 last_erased
= LAST_BT_ID
;
2926 last_erased
= (u8
)j
+ 1;
2927 while ((j
> FIRST_BT_ID
) && (BTBLOCK_INVAL
==
2928 g_pBTBlocks
[j
- FIRST_BT_ID
]))
2933 while (g_pBTBlocks
[j
- FIRST_BT_ID
] == BTBLOCK_INVAL
)
2935 last_erased
= (u8
)j
;
2936 while ((j
< LAST_BT_ID
) && (BTBLOCK_INVAL
!=
2937 g_pBTBlocks
[j
- FIRST_BT_ID
]))
2939 if (g_pBTBlocks
[j
-FIRST_BT_ID
] == BTBLOCK_INVAL
)
2943 if (last_erased
> j
)
2944 j
+= (1 + LAST_BT_ID
- FIRST_BT_ID
);
2946 for (; (j
>= last_erased
) && (FAIL
== wResult
); j
--) {
2947 i
= (j
- FIRST_BT_ID
) %
2948 (1 + LAST_BT_ID
- FIRST_BT_ID
);
2950 FTL_Search_Block_Table_IN_Block(g_pBTBlocks
[i
],
2951 i
+ FIRST_BT_ID
, &Page
);
2952 if (g_cBlockTableStatus
== IN_PROGRESS_BLOCK_TABLE
)
2953 block_table_found
= 0;
2955 while ((search_result
== PASS
) && (FAIL
== wResult
)) {
2956 nand_dbg_print(NAND_DBG_DEBUG
,
2957 "FTL_Read_Block_Table:"
2958 "Block: %u Page: %u "
2959 "contains block table\n",
2960 (unsigned int)g_pBTBlocks
[i
],
2961 (unsigned int)Page
);
2963 tempBuf
= tmp_buf2_read_blk_table
;
2965 for (k
= 0; k
< bt_pages
; k
++) {
2966 Block
= g_pBTBlocks
[i
];
2970 GLOB_LLD_Read_Page_Main_Polling(
2971 tempBuf
, Block
, Page
, PageCount
);
2973 tempvar
= k
? 0 : 4;
2976 FTL_Copy_Block_Table_From_Flash(
2978 DeviceInfo
.wPageDataSize
- tempvar
,
2984 wResult
= FTL_Check_Block_Table(FAIL
);
2985 if (FAIL
== wResult
) {
2986 block_table_found
= 0;
2987 if (Page
> bt_pages
)
2988 Page
-= ((bt_pages
<<1) + 1);
2990 search_result
= FAIL
;
2996 if (PASS
== wResult
) {
2997 if (!block_table_found
)
2998 FTL_Execute_SPL_Recovery();
3000 if (g_cBlockTableStatus
== IN_PROGRESS_BLOCK_TABLE
)
3001 g_wBlockTableOffset
= (u16
)Page
+ 1;
3003 g_wBlockTableOffset
= (u16
)Page
- bt_pages
;
3005 g_wBlockTableIndex
= (u32
)g_pBTBlocks
[i
];
3008 if (DeviceInfo
.MLCDevice
)
3009 memcpy(g_pBTStartingCopy
, g_pBlockTable
,
3010 DeviceInfo
.wDataBlockNum
* sizeof(u32
)
3011 + DeviceInfo
.wDataBlockNum
* sizeof(u8
)
3012 + DeviceInfo
.wDataBlockNum
* sizeof(u16
));
3014 memcpy(g_pBTStartingCopy
, g_pBlockTable
,
3015 DeviceInfo
.wDataBlockNum
* sizeof(u32
)
3016 + DeviceInfo
.wDataBlockNum
* sizeof(u8
));
3020 if (FAIL
== wResult
)
3021 printk(KERN_ERR
"Yunpeng - "
3022 "Can not find valid spectra block table!\n");
3024 #if AUTO_FORMAT_FLASH
3025 if (FAIL
== wResult
) {
3026 nand_dbg_print(NAND_DBG_DEBUG
, "doing auto-format\n");
3027 wResult
= FTL_Format_Flash(0);
3035 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3036 * Function: FTL_Flash_Error_Handle
3037 * Inputs: Pointer to data
3040 * Outputs: PASS=0 / FAIL=1
3041 * Description: It handles any error occured during Spectra operation
3042 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3043 static int FTL_Flash_Error_Handle(u8
*pData
, u64 old_page_addr
,
3048 u32 tmp_node
, blk_node
= BLK_FROM_ADDR(blk_addr
);
3052 u32
*pbt
= (u32
*)g_pBlockTable
;
3054 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
3055 __FILE__
, __LINE__
, __func__
);
3057 if (ERR
== GLOB_FTL_Garbage_Collection())
3061 for (i
= DeviceInfo
.wSpectraEndBlock
-
3062 DeviceInfo
.wSpectraStartBlock
;
3064 if (IS_SPARE_BLOCK(i
)) {
3065 tmp_node
= (u32
)(BAD_BLOCK
|
3067 pbt
[blk_node
] = (u32
)(pbt
[i
] &
3071 p_BTableChangesDelta
=
3072 (struct BTableChangesDelta
*)
3075 sizeof(struct BTableChangesDelta
);
3077 p_BTableChangesDelta
->ftl_cmd_cnt
=
3079 p_BTableChangesDelta
->BT_Index
=
3081 p_BTableChangesDelta
->BT_Entry_Value
=
3083 p_BTableChangesDelta
->ValidFields
= 0x0C;
3085 p_BTableChangesDelta
=
3086 (struct BTableChangesDelta
*)
3089 sizeof(struct BTableChangesDelta
);
3091 p_BTableChangesDelta
->ftl_cmd_cnt
=
3093 p_BTableChangesDelta
->BT_Index
= i
;
3094 p_BTableChangesDelta
->BT_Entry_Value
= pbt
[i
];
3095 p_BTableChangesDelta
->ValidFields
= 0x0C;
3102 if (FAIL
== wResult
) {
3103 if (FAIL
== GLOB_FTL_Garbage_Collection())
3109 if (IN_PROGRESS_BLOCK_TABLE
!= g_cBlockTableStatus
) {
3110 g_cBlockTableStatus
= IN_PROGRESS_BLOCK_TABLE
;
3111 FTL_Write_IN_Progress_Block_Table_Page();
3114 phy_addr
= FTL_Get_Physical_Block_Addr(blk_addr
);
3116 for (j
= 0; j
< RETRY_TIMES
; j
++) {
3117 if (PASS
== wErase
) {
3118 if (FAIL
== GLOB_FTL_Block_Erase(phy_addr
)) {
3119 MARK_BLOCK_AS_BAD(pbt
[blk_node
]);
3123 if (PASS
== FTL_Cache_Update_Block(pData
,
3133 } while (FAIL
== wResult
);
3135 FTL_Write_Block_Table(FAIL
);
3140 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3141 * Function: FTL_Get_Page_Num
3142 * Inputs: Size in bytes
3143 * Outputs: Size in pages
3144 * Description: It calculates the pages required for the length passed
3145 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3146 static u32
FTL_Get_Page_Num(u64 length
)
3148 return (u32
)((length
>> DeviceInfo
.nBitsInPageDataSize
) +
3149 (GLOB_u64_Remainder(length
, 1) > 0 ? 1 : 0));
3152 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3153 * Function: FTL_Get_Physical_Block_Addr
3154 * Inputs: Block Address (byte format)
3155 * Outputs: Physical address of the block.
3156 * Description: It translates LBA to PBA by returning address stored
3157 * at the LBA location in the block table
3158 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3159 static u64
FTL_Get_Physical_Block_Addr(u64 logical_addr
)
3164 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
3165 __FILE__
, __LINE__
, __func__
);
3167 pbt
= (u32
*)g_pBlockTable
;
3168 physical_addr
= (u64
) DeviceInfo
.wBlockDataSize
*
3169 (pbt
[BLK_FROM_ADDR(logical_addr
)] & (~BAD_BLOCK
));
3171 return physical_addr
;
3174 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3175 * Function: FTL_Get_Block_Index
3176 * Inputs: Physical Block no.
3177 * Outputs: Logical block no. /BAD_BLOCK
3178 * Description: It returns the logical block no. for the PBA passed
3179 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3180 static u32
FTL_Get_Block_Index(u32 wBlockNum
)
3182 u32
*pbt
= (u32
*)g_pBlockTable
;
3185 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
3186 __FILE__
, __LINE__
, __func__
);
3188 for (i
= 0; i
< DeviceInfo
.wDataBlockNum
; i
++)
3189 if (wBlockNum
== (pbt
[i
] & (~BAD_BLOCK
)))
3195 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3196 * Function: GLOB_FTL_Wear_Leveling
3199 * Description: This is static wear leveling (done by explicit call)
3200 * do complete static wear leveling
3201 * do complete garbage collection
3202 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3203 int GLOB_FTL_Wear_Leveling(void)
3205 nand_dbg_print(NAND_DBG_WARN
, "%s, Line %d, Function: %s\n",
3206 __FILE__
, __LINE__
, __func__
);
3208 FTL_Static_Wear_Leveling();
3209 GLOB_FTL_Garbage_Collection();
3214 static void find_least_most_worn(u8
*chg
,
3215 u32
*least_idx
, u8
*least_cnt
,
3216 u32
*most_idx
, u8
*most_cnt
)
3218 u32
*pbt
= (u32
*)g_pBlockTable
;
3223 for (i
= BLOCK_TABLE_INDEX
+ 1; i
< DeviceInfo
.wDataBlockNum
; i
++) {
3224 if (IS_BAD_BLOCK(i
) || PASS
== chg
[i
])
3227 idx
= (u32
) ((~BAD_BLOCK
) & pbt
[i
]);
3228 cnt
= g_pWearCounter
[idx
- DeviceInfo
.wSpectraStartBlock
];
3230 if (IS_SPARE_BLOCK(i
)) {
3231 if (cnt
> *most_cnt
) {
3237 if (IS_DATA_BLOCK(i
)) {
3238 if (cnt
< *least_cnt
) {
3244 if (PASS
== chg
[*most_idx
] || PASS
== chg
[*least_idx
]) {
3245 debug_boundary_error(*most_idx
,
3246 DeviceInfo
.wDataBlockNum
, 0);
3247 debug_boundary_error(*least_idx
,
3248 DeviceInfo
.wDataBlockNum
, 0);
3254 static int move_blks_for_wear_leveling(u8
*chg
,
3255 u32
*least_idx
, u32
*rep_blk_num
, int *result
)
3257 u32
*pbt
= (u32
*)g_pBlockTable
;
3259 int j
, ret_cp_blk
, ret_erase
;
3262 chg
[*least_idx
] = PASS
;
3263 debug_boundary_error(*least_idx
, DeviceInfo
.wDataBlockNum
, 0);
3265 rep_blk
= FTL_Replace_MWBlock();
3266 if (rep_blk
!= BAD_BLOCK
) {
3267 nand_dbg_print(NAND_DBG_DEBUG
,
3268 "More than two spare blocks exist so do it\n");
3269 nand_dbg_print(NAND_DBG_DEBUG
, "Block Replaced is %d\n",
3272 chg
[rep_blk
] = PASS
;
3274 if (IN_PROGRESS_BLOCK_TABLE
!= g_cBlockTableStatus
) {
3275 g_cBlockTableStatus
= IN_PROGRESS_BLOCK_TABLE
;
3276 FTL_Write_IN_Progress_Block_Table_Page();
3279 for (j
= 0; j
< RETRY_TIMES
; j
++) {
3280 ret_cp_blk
= FTL_Copy_Block((u64
)(*least_idx
) *
3281 DeviceInfo
.wBlockDataSize
,
3282 (u64
)rep_blk
* DeviceInfo
.wBlockDataSize
);
3283 if (FAIL
== ret_cp_blk
) {
3284 ret_erase
= GLOB_FTL_Block_Erase((u64
)rep_blk
3285 * DeviceInfo
.wBlockDataSize
);
3286 if (FAIL
== ret_erase
)
3287 MARK_BLOCK_AS_BAD(pbt
[rep_blk
]);
3289 nand_dbg_print(NAND_DBG_DEBUG
,
3290 "FTL_Copy_Block == OK\n");
3295 if (j
< RETRY_TIMES
) {
3297 u32 old_idx
= FTL_Get_Block_Index(*least_idx
);
3298 u32 rep_idx
= FTL_Get_Block_Index(rep_blk
);
3299 tmp
= (u32
)(DISCARD_BLOCK
| pbt
[old_idx
]);
3300 pbt
[old_idx
] = (u32
)((~SPARE_BLOCK
) &
3304 p_BTableChangesDelta
= (struct BTableChangesDelta
*)
3306 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
3307 p_BTableChangesDelta
->ftl_cmd_cnt
=
3309 p_BTableChangesDelta
->BT_Index
= old_idx
;
3310 p_BTableChangesDelta
->BT_Entry_Value
= pbt
[old_idx
];
3311 p_BTableChangesDelta
->ValidFields
= 0x0C;
3313 p_BTableChangesDelta
= (struct BTableChangesDelta
*)
3315 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
3317 p_BTableChangesDelta
->ftl_cmd_cnt
=
3319 p_BTableChangesDelta
->BT_Index
= rep_idx
;
3320 p_BTableChangesDelta
->BT_Entry_Value
= pbt
[rep_idx
];
3321 p_BTableChangesDelta
->ValidFields
= 0x0C;
3324 pbt
[FTL_Get_Block_Index(rep_blk
)] |= BAD_BLOCK
;
3326 p_BTableChangesDelta
= (struct BTableChangesDelta
*)
3328 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
3330 p_BTableChangesDelta
->ftl_cmd_cnt
=
3332 p_BTableChangesDelta
->BT_Index
=
3333 FTL_Get_Block_Index(rep_blk
);
3334 p_BTableChangesDelta
->BT_Entry_Value
=
3335 pbt
[FTL_Get_Block_Index(rep_blk
)];
3336 p_BTableChangesDelta
->ValidFields
= 0x0C;
3342 if (((*rep_blk_num
)++) > WEAR_LEVELING_BLOCK_NUM
)
3345 printk(KERN_ERR
"Less than 3 spare blocks exist so quit\n");
3352 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3353 * Function: FTL_Static_Wear_Leveling
3355 * Outputs: PASS=0 / FAIL=1
3356 * Description: This is static wear leveling (done by explicit call)
3357 * search for most&least used
3358 * if difference < GATE:
3359 * update the block table with exhange
3360 * mark block table in flash as IN_PROGRESS
3362 * the caller should handle GC clean up after calling this function
3363 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3364 int FTL_Static_Wear_Leveling(void)
3372 u32 replaced_blks
= 0;
3373 u8
*chang_flag
= flags_static_wear_leveling
;
3375 nand_dbg_print(NAND_DBG_WARN
, "%s, Line %d, Function: %s\n",
3376 __FILE__
, __LINE__
, __func__
);
3381 memset(chang_flag
, FAIL
, DeviceInfo
.wDataBlockNum
);
3382 while (go_on
== PASS
) {
3383 nand_dbg_print(NAND_DBG_DEBUG
,
3384 "starting static wear leveling\n");
3386 least_worn_cnt
= 0xFF;
3387 least_worn_idx
= BLOCK_TABLE_INDEX
;
3388 most_worn_idx
= BLOCK_TABLE_INDEX
;
3390 find_least_most_worn(chang_flag
, &least_worn_idx
,
3391 &least_worn_cnt
, &most_worn_idx
, &most_worn_cnt
);
3393 nand_dbg_print(NAND_DBG_DEBUG
,
3394 "Used and least worn is block %u, whos count is %u\n",
3395 (unsigned int)least_worn_idx
,
3396 (unsigned int)least_worn_cnt
);
3398 nand_dbg_print(NAND_DBG_DEBUG
,
3399 "Free and most worn is block %u, whos count is %u\n",
3400 (unsigned int)most_worn_idx
,
3401 (unsigned int)most_worn_cnt
);
3403 if ((most_worn_cnt
> least_worn_cnt
) &&
3404 (most_worn_cnt
- least_worn_cnt
> WEAR_LEVELING_GATE
))
3405 go_on
= move_blks_for_wear_leveling(chang_flag
,
3406 &least_worn_idx
, &replaced_blks
, &result
);
3415 static int do_garbage_collection(u32 discard_cnt
)
3417 u32
*pbt
= (u32
*)g_pBlockTable
;
3419 u8 bt_block_erased
= 0;
3420 int i
, cnt
, ret
= FAIL
;
3424 while ((i
< DeviceInfo
.wDataBlockNum
) && (discard_cnt
> 0) &&
3425 ((ftl_cmd_cnt
+ 28) < 256)) {
3426 if (((pbt
[i
] & BAD_BLOCK
) != BAD_BLOCK
) &&
3427 (pbt
[i
] & DISCARD_BLOCK
)) {
3428 if (IN_PROGRESS_BLOCK_TABLE
!= g_cBlockTableStatus
) {
3429 g_cBlockTableStatus
= IN_PROGRESS_BLOCK_TABLE
;
3430 FTL_Write_IN_Progress_Block_Table_Page();
3433 addr
= FTL_Get_Physical_Block_Addr((u64
)i
*
3434 DeviceInfo
.wBlockDataSize
);
3435 pba
= BLK_FROM_ADDR(addr
);
3437 for (cnt
= FIRST_BT_ID
; cnt
<= LAST_BT_ID
; cnt
++) {
3438 if (pba
== g_pBTBlocks
[cnt
- FIRST_BT_ID
]) {
3439 nand_dbg_print(NAND_DBG_DEBUG
,
3440 "GC will erase BT block %u\n",
3444 bt_block_erased
= 1;
3449 if (bt_block_erased
) {
3450 bt_block_erased
= 0;
3454 addr
= FTL_Get_Physical_Block_Addr((u64
)i
*
3455 DeviceInfo
.wBlockDataSize
);
3457 if (PASS
== GLOB_FTL_Block_Erase(addr
)) {
3458 pbt
[i
] &= (u32
)(~DISCARD_BLOCK
);
3459 pbt
[i
] |= (u32
)(SPARE_BLOCK
);
3460 p_BTableChangesDelta
=
3461 (struct BTableChangesDelta
*)
3464 sizeof(struct BTableChangesDelta
);
3465 p_BTableChangesDelta
->ftl_cmd_cnt
=
3467 p_BTableChangesDelta
->BT_Index
= i
;
3468 p_BTableChangesDelta
->BT_Entry_Value
= pbt
[i
];
3469 p_BTableChangesDelta
->ValidFields
= 0x0C;
3473 MARK_BLOCK_AS_BAD(pbt
[i
]);
3484 static int do_garbage_collection(u32 discard_cnt
)
3486 u32
*pbt
= (u32
*)g_pBlockTable
;
3488 u8 bt_block_erased
= 0;
3489 int i
, cnt
, ret
= FAIL
;
3493 while ((i
< DeviceInfo
.wDataBlockNum
) && (discard_cnt
> 0)) {
3494 if (((pbt
[i
] & BAD_BLOCK
) != BAD_BLOCK
) &&
3495 (pbt
[i
] & DISCARD_BLOCK
)) {
3496 if (IN_PROGRESS_BLOCK_TABLE
!= g_cBlockTableStatus
) {
3497 g_cBlockTableStatus
= IN_PROGRESS_BLOCK_TABLE
;
3498 FTL_Write_IN_Progress_Block_Table_Page();
3501 addr
= FTL_Get_Physical_Block_Addr((u64
)i
*
3502 DeviceInfo
.wBlockDataSize
);
3503 pba
= BLK_FROM_ADDR(addr
);
3505 for (cnt
= FIRST_BT_ID
; cnt
<= LAST_BT_ID
; cnt
++) {
3506 if (pba
== g_pBTBlocks
[cnt
- FIRST_BT_ID
]) {
3507 nand_dbg_print(NAND_DBG_DEBUG
,
3508 "GC will erase BT block %d\n",
3512 bt_block_erased
= 1;
3517 if (bt_block_erased
) {
3518 bt_block_erased
= 0;
3522 /* If the discard block is L2 cache block, then just skip it */
3523 for (cnt
= 0; cnt
< BLK_NUM_FOR_L2_CACHE
; cnt
++) {
3524 if (cache_l2
.blk_array
[cnt
] == pba
) {
3525 nand_dbg_print(NAND_DBG_DEBUG
,
3526 "GC will erase L2 cache blk %d\n",
3531 if (cnt
< BLK_NUM_FOR_L2_CACHE
) { /* Skip it */
3537 addr
= FTL_Get_Physical_Block_Addr((u64
)i
*
3538 DeviceInfo
.wBlockDataSize
);
3540 if (PASS
== GLOB_FTL_Block_Erase(addr
)) {
3541 pbt
[i
] &= (u32
)(~DISCARD_BLOCK
);
3542 pbt
[i
] |= (u32
)(SPARE_BLOCK
);
3546 MARK_BLOCK_AS_BAD(pbt
[i
]);
3557 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3558 * Function: GLOB_FTL_Garbage_Collection
3560 * Outputs: PASS / FAIL (returns the number of un-erased blocks
3561 * Description: search the block table for all discarded blocks to erase
3562 * for each discarded block:
3563 * set the flash block to IN_PROGRESS
3565 * update the block table
3566 * write the block table to flash
3567 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3568 int GLOB_FTL_Garbage_Collection(void)
3573 u32
*pbt
= (u32
*)g_pBlockTable
;
3575 nand_dbg_print(NAND_DBG_WARN
, "%s, Line %d, Function: %s\n",
3576 __FILE__
, __LINE__
, __func__
);
3579 printk(KERN_ALERT
"GLOB_FTL_Garbage_Collection() "
3580 "has been re-entered! Exit.\n");
3586 GLOB_FTL_BT_Garbage_Collection();
3588 for (i
= 0; i
< DeviceInfo
.wDataBlockNum
; i
++) {
3589 if (IS_DISCARDED_BLOCK(i
))
3593 if (wDiscard
<= 0) {
3598 nand_dbg_print(NAND_DBG_DEBUG
,
3599 "Found %d discarded blocks\n", wDiscard
);
3601 FTL_Write_Block_Table(FAIL
);
3603 wResult
= do_garbage_collection(wDiscard
);
3605 FTL_Write_Block_Table(FAIL
);
3614 static int do_bt_garbage_collection(void)
3617 u32
*pbt
= (u32
*)g_pBlockTable
;
3618 u32
*pBTBlocksNode
= (u32
*)g_pBTBlocks
;
3622 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
3623 __FILE__
, __LINE__
, __func__
);
3630 for (i
= last_erased
; (i
<= LAST_BT_ID
) &&
3631 (g_pBTBlocks
[((i
+ 2) % (1 + LAST_BT_ID
- FIRST_BT_ID
)) +
3632 FIRST_BT_ID
- FIRST_BT_ID
] != BTBLOCK_INVAL
) &&
3633 ((ftl_cmd_cnt
+ 28)) < 256; i
++) {
3634 pba
= pBTBlocksNode
[i
- FIRST_BT_ID
];
3635 lba
= FTL_Get_Block_Index(pba
);
3636 nand_dbg_print(NAND_DBG_DEBUG
,
3637 "do_bt_garbage_collection: pba %d, lba %d\n",
3639 nand_dbg_print(NAND_DBG_DEBUG
,
3640 "Block Table Entry: %d", pbt
[lba
]);
3642 if (((pbt
[lba
] & BAD_BLOCK
) != BAD_BLOCK
) &&
3643 (pbt
[lba
] & DISCARD_BLOCK
)) {
3644 nand_dbg_print(NAND_DBG_DEBUG
,
3645 "do_bt_garbage_collection_cdma: "
3646 "Erasing Block tables present in block %d\n",
3648 addr
= FTL_Get_Physical_Block_Addr((u64
)lba
*
3649 DeviceInfo
.wBlockDataSize
);
3650 if (PASS
== GLOB_FTL_Block_Erase(addr
)) {
3651 pbt
[lba
] &= (u32
)(~DISCARD_BLOCK
);
3652 pbt
[lba
] |= (u32
)(SPARE_BLOCK
);
3654 p_BTableChangesDelta
=
3655 (struct BTableChangesDelta
*)
3658 sizeof(struct BTableChangesDelta
);
3660 p_BTableChangesDelta
->ftl_cmd_cnt
=
3662 p_BTableChangesDelta
->BT_Index
= lba
;
3663 p_BTableChangesDelta
->BT_Entry_Value
=
3666 p_BTableChangesDelta
->ValidFields
= 0x0C;
3669 pBTBlocksNode
[last_erased
- FIRST_BT_ID
] =
3671 nand_dbg_print(NAND_DBG_DEBUG
,
3672 "resetting bt entry at index %d "
3674 pBTBlocksNode
[i
- FIRST_BT_ID
]);
3675 if (last_erased
== LAST_BT_ID
)
3676 last_erased
= FIRST_BT_ID
;
3680 MARK_BLOCK_AS_BAD(pbt
[lba
]);
3691 static int do_bt_garbage_collection(void)
3694 u32
*pbt
= (u32
*)g_pBlockTable
;
3695 u32
*pBTBlocksNode
= (u32
*)g_pBTBlocks
;
3699 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
3700 __FILE__
, __LINE__
, __func__
);
3707 for (i
= last_erased
; (i
<= LAST_BT_ID
) &&
3708 (g_pBTBlocks
[((i
+ 2) % (1 + LAST_BT_ID
- FIRST_BT_ID
)) +
3709 FIRST_BT_ID
- FIRST_BT_ID
] != BTBLOCK_INVAL
); i
++) {
3710 pba
= pBTBlocksNode
[i
- FIRST_BT_ID
];
3711 lba
= FTL_Get_Block_Index(pba
);
3712 nand_dbg_print(NAND_DBG_DEBUG
,
3713 "do_bt_garbage_collection_cdma: pba %d, lba %d\n",
3715 nand_dbg_print(NAND_DBG_DEBUG
,
3716 "Block Table Entry: %d", pbt
[lba
]);
3718 if (((pbt
[lba
] & BAD_BLOCK
) != BAD_BLOCK
) &&
3719 (pbt
[lba
] & DISCARD_BLOCK
)) {
3720 nand_dbg_print(NAND_DBG_DEBUG
,
3721 "do_bt_garbage_collection: "
3722 "Erasing Block tables present in block %d\n",
3724 addr
= FTL_Get_Physical_Block_Addr((u64
)lba
*
3725 DeviceInfo
.wBlockDataSize
);
3726 if (PASS
== GLOB_FTL_Block_Erase(addr
)) {
3727 pbt
[lba
] &= (u32
)(~DISCARD_BLOCK
);
3728 pbt
[lba
] |= (u32
)(SPARE_BLOCK
);
3730 pBTBlocksNode
[last_erased
- FIRST_BT_ID
] =
3732 nand_dbg_print(NAND_DBG_DEBUG
,
3733 "resetting bt entry at index %d "
3735 pBTBlocksNode
[i
- FIRST_BT_ID
]);
3736 if (last_erased
== LAST_BT_ID
)
3737 last_erased
= FIRST_BT_ID
;
3741 MARK_BLOCK_AS_BAD(pbt
[lba
]);
3753 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3754 * Function: GLOB_FTL_BT_Garbage_Collection
3756 * Outputs: PASS / FAIL (returns the number of un-erased blocks
3757 * Description: Erases discarded blocks containing Block table
3759 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3760 int GLOB_FTL_BT_Garbage_Collection(void)
3762 return do_bt_garbage_collection();
3765 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3766 * Function: FTL_Replace_OneBlock
3767 * Inputs: Block number 1
3769 * Outputs: Replaced Block Number
3770 * Description: Interchange block table entries at wBlockNum and wReplaceNum
3772 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3773 static u32
FTL_Replace_OneBlock(u32 blk
, u32 rep_blk
)
3776 u32 replace_node
= BAD_BLOCK
;
3777 u32
*pbt
= (u32
*)g_pBlockTable
;
3779 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
3780 __FILE__
, __LINE__
, __func__
);
3782 if (rep_blk
!= BAD_BLOCK
) {
3783 if (IS_BAD_BLOCK(blk
))
3786 tmp_blk
= DISCARD_BLOCK
| (~SPARE_BLOCK
& pbt
[blk
]);
3788 replace_node
= (u32
) ((~SPARE_BLOCK
) & pbt
[rep_blk
]);
3789 pbt
[blk
] = replace_node
;
3790 pbt
[rep_blk
] = tmp_blk
;
3793 p_BTableChangesDelta
=
3794 (struct BTableChangesDelta
*)g_pBTDelta_Free
;
3795 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
3797 p_BTableChangesDelta
->ftl_cmd_cnt
= ftl_cmd_cnt
;
3798 p_BTableChangesDelta
->BT_Index
= blk
;
3799 p_BTableChangesDelta
->BT_Entry_Value
= pbt
[blk
];
3801 p_BTableChangesDelta
->ValidFields
= 0x0C;
3803 p_BTableChangesDelta
=
3804 (struct BTableChangesDelta
*)g_pBTDelta_Free
;
3805 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
3807 p_BTableChangesDelta
->ftl_cmd_cnt
= ftl_cmd_cnt
;
3808 p_BTableChangesDelta
->BT_Index
= rep_blk
;
3809 p_BTableChangesDelta
->BT_Entry_Value
= pbt
[rep_blk
];
3810 p_BTableChangesDelta
->ValidFields
= 0x0C;
3814 return replace_node
;
3817 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3818 * Function: FTL_Write_Block_Table_Data
3819 * Inputs: Block table size in pages
3820 * Outputs: PASS=0 / FAIL=1
3821 * Description: Write block table data in flash
3822 * If first page and last page
3823 * Write data+BT flag
3826 * BT flag is a counter. Its value is incremented for block table
3827 * write in a new Block
3828 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3829 static int FTL_Write_Block_Table_Data(void)
3831 u64 dwBlockTableAddr
, pTempAddr
;
3833 u16 Page
, PageCount
;
3834 u8
*tempBuf
= tmp_buf_write_blk_table_data
;
3838 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
3839 __FILE__
, __LINE__
, __func__
);
3842 (u64
)((u64
)g_wBlockTableIndex
* DeviceInfo
.wBlockDataSize
+
3843 (u64
)g_wBlockTableOffset
* DeviceInfo
.wPageDataSize
);
3844 pTempAddr
= dwBlockTableAddr
;
3846 bt_pages
= FTL_Get_Block_Table_Flash_Size_Pages();
3848 nand_dbg_print(NAND_DBG_DEBUG
, "FTL_Write_Block_Table_Data: "
3849 "page= %d BlockTableIndex= %d "
3850 "BlockTableOffset=%d\n", bt_pages
,
3851 g_wBlockTableIndex
, g_wBlockTableOffset
);
3853 Block
= BLK_FROM_ADDR(pTempAddr
);
3854 Page
= PAGE_FROM_ADDR(pTempAddr
, Block
);
3857 if (bt_block_changed
) {
3858 if (bt_flag
== LAST_BT_ID
) {
3859 bt_flag
= FIRST_BT_ID
;
3860 g_pBTBlocks
[bt_flag
- FIRST_BT_ID
] = Block
;
3861 } else if (bt_flag
< LAST_BT_ID
) {
3863 g_pBTBlocks
[bt_flag
- FIRST_BT_ID
] = Block
;
3866 if ((bt_flag
> (LAST_BT_ID
-4)) &&
3867 g_pBTBlocks
[FIRST_BT_ID
- FIRST_BT_ID
] !=
3869 bt_block_changed
= 0;
3870 GLOB_FTL_BT_Garbage_Collection();
3873 bt_block_changed
= 0;
3874 nand_dbg_print(NAND_DBG_DEBUG
,
3875 "Block Table Counter is %u Block %u\n",
3876 bt_flag
, (unsigned int)Block
);
3879 memset(tempBuf
, 0, 3);
3880 tempBuf
[3] = bt_flag
;
3881 wBytesCopied
= FTL_Copy_Block_Table_To_Flash(tempBuf
+ 4,
3882 DeviceInfo
.wPageDataSize
- 4, 0);
3883 memset(&tempBuf
[wBytesCopied
+ 4], 0xff,
3884 DeviceInfo
.wPageSize
- (wBytesCopied
+ 4));
3885 FTL_Insert_Block_Table_Signature(&tempBuf
[DeviceInfo
.wPageDataSize
],
3889 memcpy(g_pNextBlockTable
, tempBuf
,
3890 DeviceInfo
.wPageSize
* sizeof(u8
));
3891 nand_dbg_print(NAND_DBG_DEBUG
, "Writing First Page of Block Table "
3892 "Block %u Page %u\n", (unsigned int)Block
, Page
);
3893 if (FAIL
== GLOB_LLD_Write_Page_Main_Spare_cdma(g_pNextBlockTable
,
3895 LLD_CMD_FLAG_MODE_CDMA
| LLD_CMD_FLAG_ORDER_BEFORE_REST
)) {
3896 nand_dbg_print(NAND_DBG_WARN
, "NAND Program fail in "
3897 "%s, Line %d, Function: %s, "
3898 "new Bad Block %d generated!\n",
3899 __FILE__
, __LINE__
, __func__
, Block
);
3904 g_pNextBlockTable
+= ((DeviceInfo
.wPageSize
* sizeof(u8
)));
3906 if (FAIL
== GLOB_LLD_Write_Page_Main_Spare(tempBuf
, Block
, Page
, 1)) {
3907 nand_dbg_print(NAND_DBG_WARN
,
3908 "NAND Program fail in %s, Line %d, Function: %s, "
3909 "new Bad Block %d generated!\n",
3910 __FILE__
, __LINE__
, __func__
, Block
);
3916 PageCount
= bt_pages
- 1;
3917 if (PageCount
> 1) {
3918 wBytesCopied
+= FTL_Copy_Block_Table_To_Flash(tempBuf
,
3919 DeviceInfo
.wPageDataSize
* (PageCount
- 1),
3923 memcpy(g_pNextBlockTable
, tempBuf
,
3924 (PageCount
- 1) * DeviceInfo
.wPageDataSize
);
3925 if (FAIL
== GLOB_LLD_Write_Page_Main_cdma(
3926 g_pNextBlockTable
, Block
, Page
+ 1,
3928 nand_dbg_print(NAND_DBG_WARN
,
3929 "NAND Program fail in %s, Line %d, "
3931 "new Bad Block %d generated!\n",
3932 __FILE__
, __LINE__
, __func__
,
3938 g_pNextBlockTable
+= (PageCount
- 1) *
3939 DeviceInfo
.wPageDataSize
* sizeof(u8
);
3941 if (FAIL
== GLOB_LLD_Write_Page_Main(tempBuf
,
3942 Block
, Page
+ 1, PageCount
- 1)) {
3943 nand_dbg_print(NAND_DBG_WARN
,
3944 "NAND Program fail in %s, Line %d, "
3946 "new Bad Block %d generated!\n",
3947 __FILE__
, __LINE__
, __func__
,
3954 wBytesCopied
= FTL_Copy_Block_Table_To_Flash(tempBuf
,
3955 DeviceInfo
.wPageDataSize
, wBytesCopied
);
3956 memset(&tempBuf
[wBytesCopied
], 0xff,
3957 DeviceInfo
.wPageSize
-wBytesCopied
);
3958 FTL_Insert_Block_Table_Signature(
3959 &tempBuf
[DeviceInfo
.wPageDataSize
], bt_flag
);
3961 memcpy(g_pNextBlockTable
, tempBuf
,
3962 DeviceInfo
.wPageSize
* sizeof(u8
));
3963 nand_dbg_print(NAND_DBG_DEBUG
,
3964 "Writing the last Page of Block Table "
3965 "Block %u Page %u\n",
3966 (unsigned int)Block
, Page
+ bt_pages
- 1);
3967 if (FAIL
== GLOB_LLD_Write_Page_Main_Spare_cdma(
3968 g_pNextBlockTable
, Block
, Page
+ bt_pages
- 1, 1,
3969 LLD_CMD_FLAG_MODE_CDMA
|
3970 LLD_CMD_FLAG_ORDER_BEFORE_REST
)) {
3971 nand_dbg_print(NAND_DBG_WARN
,
3972 "NAND Program fail in %s, Line %d, "
3973 "Function: %s, new Bad Block %d generated!\n",
3974 __FILE__
, __LINE__
, __func__
, Block
);
3979 if (FAIL
== GLOB_LLD_Write_Page_Main_Spare(tempBuf
,
3980 Block
, Page
+bt_pages
- 1, 1)) {
3981 nand_dbg_print(NAND_DBG_WARN
,
3982 "NAND Program fail in %s, Line %d, "
3984 "new Bad Block %d generated!\n",
3985 __FILE__
, __LINE__
, __func__
, Block
);
3991 nand_dbg_print(NAND_DBG_DEBUG
, "FTL_Write_Block_Table_Data: done\n");
3997 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3998 * Function: FTL_Replace_Block_Table
4000 * Outputs: PASS=0 / FAIL=1
4001 * Description: Get a new block to write block table
4002 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4003 static u32
FTL_Replace_Block_Table(void)
4008 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
4009 __FILE__
, __LINE__
, __func__
);
4011 blk
= FTL_Replace_LWBlock(BLOCK_TABLE_INDEX
, &gc
);
4013 if ((BAD_BLOCK
== blk
) && (PASS
== gc
)) {
4014 GLOB_FTL_Garbage_Collection();
4015 blk
= FTL_Replace_LWBlock(BLOCK_TABLE_INDEX
, &gc
);
4017 if (BAD_BLOCK
== blk
)
4018 printk(KERN_ERR
"%s, %s: There is no spare block. "
4019 "It should never happen\n",
4020 __FILE__
, __func__
);
4022 nand_dbg_print(NAND_DBG_DEBUG
, "New Block table Block is %d\n", blk
);
4027 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4028 * Function: FTL_Replace_LWBlock
4029 * Inputs: Block number
4030 * Pointer to Garbage Collect flag
4032 * Description: Determine the least weared block by traversing
4034 * Set Garbage collection to be called if number of spare
4035 * block is less than Free Block Gate count
4036 * Change Block table entry to map least worn block for current
4038 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4039 static u32
FTL_Replace_LWBlock(u32 wBlockNum
, int *pGarbageCollect
)
4042 u32
*pbt
= (u32
*)g_pBlockTable
;
4043 u8 wLeastWornCounter
= 0xFF;
4044 u32 wLeastWornIndex
= BAD_BLOCK
;
4045 u32 wSpareBlockNum
= 0;
4046 u32 wDiscardBlockNum
= 0;
4048 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
4049 __FILE__
, __LINE__
, __func__
);
4051 if (IS_SPARE_BLOCK(wBlockNum
)) {
4052 *pGarbageCollect
= FAIL
;
4053 pbt
[wBlockNum
] = (u32
)(pbt
[wBlockNum
] & (~SPARE_BLOCK
));
4055 p_BTableChangesDelta
=
4056 (struct BTableChangesDelta
*)g_pBTDelta_Free
;
4057 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
4058 p_BTableChangesDelta
->ftl_cmd_cnt
=
4060 p_BTableChangesDelta
->BT_Index
= (u32
)(wBlockNum
);
4061 p_BTableChangesDelta
->BT_Entry_Value
= pbt
[wBlockNum
];
4062 p_BTableChangesDelta
->ValidFields
= 0x0C;
4064 return pbt
[wBlockNum
];
4067 for (i
= 0; i
< DeviceInfo
.wDataBlockNum
; i
++) {
4068 if (IS_DISCARDED_BLOCK(i
))
4071 if (IS_SPARE_BLOCK(i
)) {
4072 u32 wPhysicalIndex
= (u32
)((~BAD_BLOCK
) & pbt
[i
]);
4073 if (wPhysicalIndex
> DeviceInfo
.wSpectraEndBlock
)
4074 printk(KERN_ERR
"FTL_Replace_LWBlock: "
4075 "This should never occur!\n");
4076 if (g_pWearCounter
[wPhysicalIndex
-
4077 DeviceInfo
.wSpectraStartBlock
] <
4078 wLeastWornCounter
) {
4080 g_pWearCounter
[wPhysicalIndex
-
4081 DeviceInfo
.wSpectraStartBlock
];
4082 wLeastWornIndex
= i
;
4088 nand_dbg_print(NAND_DBG_WARN
,
4089 "FTL_Replace_LWBlock: Least Worn Counter %d\n",
4090 (int)wLeastWornCounter
);
4092 if ((wDiscardBlockNum
>= NUM_FREE_BLOCKS_GATE
) ||
4093 (wSpareBlockNum
<= NUM_FREE_BLOCKS_GATE
))
4094 *pGarbageCollect
= PASS
;
4096 *pGarbageCollect
= FAIL
;
4098 nand_dbg_print(NAND_DBG_DEBUG
,
4099 "FTL_Replace_LWBlock: Discarded Blocks %u Spare"
4101 (unsigned int)wDiscardBlockNum
,
4102 (unsigned int)wSpareBlockNum
);
4104 return FTL_Replace_OneBlock(wBlockNum
, wLeastWornIndex
);
4107 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4108 * Function: FTL_Replace_MWBlock
4110 * Outputs: most worn spare block no./BAD_BLOCK
4111 * Description: It finds most worn spare block.
4112 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4113 static u32
FTL_Replace_MWBlock(void)
4116 u32
*pbt
= (u32
*)g_pBlockTable
;
4117 u8 wMostWornCounter
= 0;
4118 u32 wMostWornIndex
= BAD_BLOCK
;
4119 u32 wSpareBlockNum
= 0;
4121 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
4122 __FILE__
, __LINE__
, __func__
);
4124 for (i
= 0; i
< DeviceInfo
.wDataBlockNum
; i
++) {
4125 if (IS_SPARE_BLOCK(i
)) {
4126 u32 wPhysicalIndex
= (u32
)((~SPARE_BLOCK
) & pbt
[i
]);
4127 if (g_pWearCounter
[wPhysicalIndex
-
4128 DeviceInfo
.wSpectraStartBlock
] >
4131 g_pWearCounter
[wPhysicalIndex
-
4132 DeviceInfo
.wSpectraStartBlock
];
4133 wMostWornIndex
= wPhysicalIndex
;
4139 if (wSpareBlockNum
<= 2)
4142 return wMostWornIndex
;
4145 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4146 * Function: FTL_Replace_Block
4147 * Inputs: Block Address
4148 * Outputs: PASS=0 / FAIL=1
4149 * Description: If block specified by blk_addr parameter is not free,
4150 * replace it with the least worn block.
4151 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4152 static int FTL_Replace_Block(u64 blk_addr
)
4154 u32 current_blk
= BLK_FROM_ADDR(blk_addr
);
4155 u32
*pbt
= (u32
*)g_pBlockTable
;
4157 int GarbageCollect
= FAIL
;
4159 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
4160 __FILE__
, __LINE__
, __func__
);
4162 if (IS_SPARE_BLOCK(current_blk
)) {
4163 pbt
[current_blk
] = (~SPARE_BLOCK
) & pbt
[current_blk
];
4165 p_BTableChangesDelta
=
4166 (struct BTableChangesDelta
*)g_pBTDelta_Free
;
4167 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
4168 p_BTableChangesDelta
->ftl_cmd_cnt
=
4170 p_BTableChangesDelta
->BT_Index
= current_blk
;
4171 p_BTableChangesDelta
->BT_Entry_Value
= pbt
[current_blk
];
4172 p_BTableChangesDelta
->ValidFields
= 0x0C ;
4177 FTL_Replace_LWBlock(current_blk
, &GarbageCollect
);
4179 if (PASS
== GarbageCollect
)
4180 wResult
= GLOB_FTL_Garbage_Collection();
4185 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4186 * Function: GLOB_FTL_Is_BadBlock
4187 * Inputs: block number to test
4188 * Outputs: PASS (block is BAD) / FAIL (block is not bad)
4189 * Description: test if this block number is flagged as bad
4190 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4191 int GLOB_FTL_Is_BadBlock(u32 wBlockNum
)
4193 u32
*pbt
= (u32
*)g_pBlockTable
;
4195 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
4196 __FILE__
, __LINE__
, __func__
);
4198 if (wBlockNum
>= DeviceInfo
.wSpectraStartBlock
4199 && BAD_BLOCK
== (pbt
[wBlockNum
] & BAD_BLOCK
))
4205 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4206 * Function: GLOB_FTL_Flush_Cache
4208 * Outputs: PASS=0 / FAIL=1
4209 * Description: flush all the cache blocks to flash
4210 * if a cache block is not dirty, don't do anything with it
4211 * else, write the block and update the block table
4212 * Note: This function should be called at shutdown/power down.
4213 * to write important data into device
4214 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4215 int GLOB_FTL_Flush_Cache(void)
4219 nand_dbg_print(NAND_DBG_WARN
, "%s, Line %d, Function: %s\n",
4220 __FILE__
, __LINE__
, __func__
);
4222 for (i
= 0; i
< CACHE_ITEM_NUM
; i
++) {
4223 if (SET
== Cache
.array
[i
].changed
) {
4225 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
4226 int_cache
[ftl_cmd_cnt
].item
= i
;
4227 int_cache
[ftl_cmd_cnt
].cache
.address
=
4228 Cache
.array
[i
].address
;
4229 int_cache
[ftl_cmd_cnt
].cache
.changed
= CLEAR
;
4232 ret
= write_back_to_l2_cache(Cache
.array
[i
].buf
, Cache
.array
[i
].address
);
4234 Cache
.array
[i
].changed
= CLEAR
;
4236 printk(KERN_ALERT
"Failed when write back to L2 cache!\n");
4237 /* TODO - How to handle this? */
4244 return FTL_Write_Block_Table(FAIL
);
4247 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4248 * Function: GLOB_FTL_Page_Read
4249 * Inputs: pointer to data
4250 * logical address of data (u64 is LBA * Bytes/Page)
4251 * Outputs: PASS=0 / FAIL=1
4252 * Description: reads a page of data into RAM from the cache
4253 * if the data is not already in cache, read from flash to cache
4254 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4255 int GLOB_FTL_Page_Read(u8
*data
, u64 logical_addr
)
4260 nand_dbg_print(NAND_DBG_DEBUG
, "GLOB_FTL_Page_Read - "
4261 "page_addr: %llu\n", logical_addr
);
4263 cache_item
= FTL_Cache_If_Hit(logical_addr
);
4265 if (UNHIT_CACHE_ITEM
== cache_item
) {
4266 nand_dbg_print(NAND_DBG_DEBUG
,
4267 "GLOB_FTL_Page_Read: Cache not hit\n");
4268 res
= FTL_Cache_Write();
4269 if (ERR
== FTL_Cache_Read(logical_addr
))
4271 cache_item
= Cache
.LRU
;
4274 FTL_Cache_Read_Page(data
, logical_addr
, cache_item
);
4279 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4280 * Function: GLOB_FTL_Page_Write
4281 * Inputs: pointer to data
4282 * address of data (ADDRESSTYPE is LBA * Bytes/Page)
4283 * Outputs: PASS=0 / FAIL=1
4284 * Description: writes a page of data from RAM to the cache
4285 * if the data is not already in cache, write back the
4286 * least recently used block and read the addressed block
4287 * from flash to cache
4288 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4289 int GLOB_FTL_Page_Write(u8
*pData
, u64 dwPageAddr
)
4292 u32
*pbt
= (u32
*)g_pBlockTable
;
4295 nand_dbg_print(NAND_DBG_TRACE
, "GLOB_FTL_Page_Write - "
4296 "dwPageAddr: %llu\n", dwPageAddr
);
4298 cache_blk
= FTL_Cache_If_Hit(dwPageAddr
);
4300 if (UNHIT_CACHE_ITEM
== cache_blk
) {
4301 wResult
= FTL_Cache_Write();
4302 if (IS_BAD_BLOCK(BLK_FROM_ADDR(dwPageAddr
))) {
4303 wResult
= FTL_Replace_Block(dwPageAddr
);
4304 pbt
[BLK_FROM_ADDR(dwPageAddr
)] |= SPARE_BLOCK
;
4305 if (wResult
== FAIL
)
4308 if (ERR
== FTL_Cache_Read(dwPageAddr
))
4310 cache_blk
= Cache
.LRU
;
4311 FTL_Cache_Write_Page(pData
, dwPageAddr
, cache_blk
, 0);
4314 FTL_Cache_Write_Page(pData
, dwPageAddr
, cache_blk
,
4315 LLD_CMD_FLAG_ORDER_BEFORE_REST
);
4317 FTL_Cache_Write_Page(pData
, dwPageAddr
, cache_blk
, 0);
4324 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4325 * Function: GLOB_FTL_Block_Erase
4326 * Inputs: address of block to erase (now in byte format, should change to
4328 * Outputs: PASS=0 / FAIL=1
4329 * Description: erases the specified block
4330 * increments the erase count
4331 * If erase count reaches its upper limit,call function to
4332 * do the ajustment as per the relative erase count values
4333 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4334 int GLOB_FTL_Block_Erase(u64 blk_addr
)
4339 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
4340 __FILE__
, __LINE__
, __func__
);
4342 BlkIdx
= (u32
)(blk_addr
>> DeviceInfo
.nBitsInBlockDataSize
);
4344 if (BlkIdx
< DeviceInfo
.wSpectraStartBlock
) {
4345 printk(KERN_ERR
"GLOB_FTL_Block_Erase: "
4346 "This should never occur\n");
4351 status
= GLOB_LLD_Erase_Block_cdma(BlkIdx
, LLD_CMD_FLAG_MODE_CDMA
);
4353 nand_dbg_print(NAND_DBG_WARN
,
4354 "NAND Program fail in %s, Line %d, "
4355 "Function: %s, new Bad Block %d generated!\n",
4356 __FILE__
, __LINE__
, __func__
, BlkIdx
);
4358 status
= GLOB_LLD_Erase_Block(BlkIdx
);
4359 if (status
== FAIL
) {
4360 nand_dbg_print(NAND_DBG_WARN
,
4361 "NAND Program fail in %s, Line %d, "
4362 "Function: %s, new Bad Block %d generated!\n",
4363 __FILE__
, __LINE__
, __func__
, BlkIdx
);
4368 if (DeviceInfo
.MLCDevice
) {
4369 g_pReadCounter
[BlkIdx
- DeviceInfo
.wSpectraStartBlock
] = 0;
4370 if (g_cBlockTableStatus
!= IN_PROGRESS_BLOCK_TABLE
) {
4371 g_cBlockTableStatus
= IN_PROGRESS_BLOCK_TABLE
;
4372 FTL_Write_IN_Progress_Block_Table_Page();
4376 g_pWearCounter
[BlkIdx
- DeviceInfo
.wSpectraStartBlock
]++;
4379 p_BTableChangesDelta
=
4380 (struct BTableChangesDelta
*)g_pBTDelta_Free
;
4381 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
4382 p_BTableChangesDelta
->ftl_cmd_cnt
= ftl_cmd_cnt
;
4383 p_BTableChangesDelta
->WC_Index
=
4384 BlkIdx
- DeviceInfo
.wSpectraStartBlock
;
4385 p_BTableChangesDelta
->WC_Entry_Value
=
4386 g_pWearCounter
[BlkIdx
- DeviceInfo
.wSpectraStartBlock
];
4387 p_BTableChangesDelta
->ValidFields
= 0x30;
4389 if (DeviceInfo
.MLCDevice
) {
4390 p_BTableChangesDelta
=
4391 (struct BTableChangesDelta
*)g_pBTDelta_Free
;
4392 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
4393 p_BTableChangesDelta
->ftl_cmd_cnt
=
4395 p_BTableChangesDelta
->RC_Index
=
4396 BlkIdx
- DeviceInfo
.wSpectraStartBlock
;
4397 p_BTableChangesDelta
->RC_Entry_Value
=
4398 g_pReadCounter
[BlkIdx
-
4399 DeviceInfo
.wSpectraStartBlock
];
4400 p_BTableChangesDelta
->ValidFields
= 0xC0;
4406 if (g_pWearCounter
[BlkIdx
- DeviceInfo
.wSpectraStartBlock
] == 0xFE)
4407 FTL_Adjust_Relative_Erase_Count(BlkIdx
);
4413 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4414 * Function: FTL_Adjust_Relative_Erase_Count
4415 * Inputs: index to block that was just incremented and is at the max
4416 * Outputs: PASS=0 / FAIL=1
4417 * Description: If any erase counts at MAX, adjusts erase count of every
4418 * block by substracting least worn
4419 * counter from counter value of every entry in wear table
4420 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4421 static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX
)
4423 u8 wLeastWornCounter
= MAX_BYTE_VALUE
;
4426 u32
*pbt
= (u32
*)g_pBlockTable
;
4429 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
4430 __FILE__
, __LINE__
, __func__
);
4432 for (i
= 0; i
< DeviceInfo
.wDataBlockNum
; i
++) {
4433 if (IS_BAD_BLOCK(i
))
4435 wWearIndex
= (u32
)(pbt
[i
] & (~BAD_BLOCK
));
4437 if ((wWearIndex
- DeviceInfo
.wSpectraStartBlock
) < 0)
4438 printk(KERN_ERR
"FTL_Adjust_Relative_Erase_Count:"
4439 "This should never occur\n");
4440 wWearCounter
= g_pWearCounter
[wWearIndex
-
4441 DeviceInfo
.wSpectraStartBlock
];
4442 if (wWearCounter
< wLeastWornCounter
)
4443 wLeastWornCounter
= wWearCounter
;
4446 if (wLeastWornCounter
== 0) {
4447 nand_dbg_print(NAND_DBG_WARN
,
4448 "Adjusting Wear Levelling Counters: Special Case\n");
4449 g_pWearCounter
[Index_of_MAX
-
4450 DeviceInfo
.wSpectraStartBlock
]--;
4452 p_BTableChangesDelta
=
4453 (struct BTableChangesDelta
*)g_pBTDelta_Free
;
4454 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
4455 p_BTableChangesDelta
->ftl_cmd_cnt
= ftl_cmd_cnt
;
4456 p_BTableChangesDelta
->WC_Index
=
4457 Index_of_MAX
- DeviceInfo
.wSpectraStartBlock
;
4458 p_BTableChangesDelta
->WC_Entry_Value
=
4459 g_pWearCounter
[Index_of_MAX
-
4460 DeviceInfo
.wSpectraStartBlock
];
4461 p_BTableChangesDelta
->ValidFields
= 0x30;
4463 FTL_Static_Wear_Leveling();
4465 for (i
= 0; i
< DeviceInfo
.wDataBlockNum
; i
++)
4466 if (!IS_BAD_BLOCK(i
)) {
4467 wWearIndex
= (u32
)(pbt
[i
] & (~BAD_BLOCK
));
4468 g_pWearCounter
[wWearIndex
-
4469 DeviceInfo
.wSpectraStartBlock
] =
4472 DeviceInfo
.wSpectraStartBlock
] -
4475 p_BTableChangesDelta
=
4476 (struct BTableChangesDelta
*)g_pBTDelta_Free
;
4478 sizeof(struct BTableChangesDelta
);
4480 p_BTableChangesDelta
->ftl_cmd_cnt
=
4482 p_BTableChangesDelta
->WC_Index
= wWearIndex
-
4483 DeviceInfo
.wSpectraStartBlock
;
4484 p_BTableChangesDelta
->WC_Entry_Value
=
4485 g_pWearCounter
[wWearIndex
-
4486 DeviceInfo
.wSpectraStartBlock
];
4487 p_BTableChangesDelta
->ValidFields
= 0x30;
4495 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4496 * Function: FTL_Write_IN_Progress_Block_Table_Page
4499 * Description: It writes in-progress flag page to the page next to
4501 ***********************************************************************/
4502 static int FTL_Write_IN_Progress_Block_Table_Page(void)
4509 u32
*pbt
= (u32
*)g_pBlockTable
;
4510 u32 wTempBlockTableIndex
;
4513 nand_dbg_print(NAND_DBG_WARN
, "%s, Line %d, Function: %s\n",
4514 __FILE__
, __LINE__
, __func__
);
4516 bt_pages
= FTL_Get_Block_Table_Flash_Size_Pages();
4518 dwIPFPageAddr
= g_wBlockTableOffset
+ bt_pages
;
4520 nand_dbg_print(NAND_DBG_DEBUG
, "Writing IPF at "
4521 "Block %d Page %d\n",
4522 g_wBlockTableIndex
, dwIPFPageAddr
);
4525 wResult
= GLOB_LLD_Write_Page_Main_Spare_cdma(g_pIPF
,
4526 g_wBlockTableIndex
, dwIPFPageAddr
, 1,
4527 LLD_CMD_FLAG_MODE_CDMA
| LLD_CMD_FLAG_ORDER_BEFORE_REST
);
4528 if (wResult
== FAIL
) {
4529 nand_dbg_print(NAND_DBG_WARN
,
4530 "NAND Program fail in %s, Line %d, "
4531 "Function: %s, new Bad Block %d generated!\n",
4532 __FILE__
, __LINE__
, __func__
,
4533 g_wBlockTableIndex
);
4535 g_wBlockTableOffset
= dwIPFPageAddr
+ 1;
4536 p_BTableChangesDelta
= (struct BTableChangesDelta
*)g_pBTDelta_Free
;
4537 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
4538 p_BTableChangesDelta
->ftl_cmd_cnt
= ftl_cmd_cnt
;
4539 p_BTableChangesDelta
->g_wBlockTableOffset
= g_wBlockTableOffset
;
4540 p_BTableChangesDelta
->ValidFields
= 0x01;
4543 wResult
= GLOB_LLD_Write_Page_Main_Spare(g_pIPF
,
4544 g_wBlockTableIndex
, dwIPFPageAddr
, 1);
4545 if (wResult
== FAIL
) {
4546 nand_dbg_print(NAND_DBG_WARN
,
4547 "NAND Program fail in %s, Line %d, "
4548 "Function: %s, new Bad Block %d generated!\n",
4549 __FILE__
, __LINE__
, __func__
,
4550 (int)g_wBlockTableIndex
);
4551 MARK_BLOCK_AS_BAD(pbt
[BLOCK_TABLE_INDEX
]);
4552 wTempBlockTableIndex
= FTL_Replace_Block_Table();
4553 bt_block_changed
= 1;
4554 if (BAD_BLOCK
== wTempBlockTableIndex
)
4556 g_wBlockTableIndex
= wTempBlockTableIndex
;
4557 g_wBlockTableOffset
= 0;
4558 /* Block table tag is '00'. Means it's used one */
4559 pbt
[BLOCK_TABLE_INDEX
] = g_wBlockTableIndex
;
4562 g_wBlockTableOffset
= dwIPFPageAddr
+ 1;
4567 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4568 * Function: FTL_Read_Disturbance
4569 * Inputs: block address
4570 * Outputs: PASS=0 / FAIL=1
4571 * Description: used to handle read disturbance. Data in block that
4572 * reaches its read limit is moved to new block
4573 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4574 int FTL_Read_Disturbance(u32 blk_addr
)
4577 u32
*pbt
= (u32
*) g_pBlockTable
;
4578 u32 dwOldBlockAddr
= blk_addr
;
4581 u32 wLeastReadCounter
= 0xFFFF;
4582 u32 wLeastReadIndex
= BAD_BLOCK
;
4583 u32 wSpareBlockNum
= 0;
4588 nand_dbg_print(NAND_DBG_DEBUG
, "%s, Line %d, Function: %s\n",
4589 __FILE__
, __LINE__
, __func__
);
4592 g_pTempBuf
= cp_back_buf_copies
[cp_back_buf_idx
];
4594 if (cp_back_buf_idx
> COPY_BACK_BUF_NUM
) {
4595 printk(KERN_ERR
"cp_back_buf_copies overflow! Exit."
4596 "Maybe too many pending commands in your CDMA chain.\n");
4600 g_pTempBuf
= tmp_buf_read_disturbance
;
4603 wBlockNum
= FTL_Get_Block_Index(blk_addr
);
4606 /* This is a bug.Here 'i' should be logical block number
4607 * and start from 1 (0 is reserved for block table).
4608 * Have fixed it. - Yunpeng 2008. 12. 19
4610 for (i
= 1; i
< DeviceInfo
.wDataBlockNum
; i
++) {
4611 if (IS_SPARE_BLOCK(i
)) {
4612 u32 wPhysicalIndex
=
4613 (u32
)((~SPARE_BLOCK
) & pbt
[i
]);
4614 if (g_pReadCounter
[wPhysicalIndex
-
4615 DeviceInfo
.wSpectraStartBlock
] <
4616 wLeastReadCounter
) {
4618 g_pReadCounter
[wPhysicalIndex
-
4619 DeviceInfo
.wSpectraStartBlock
];
4620 wLeastReadIndex
= i
;
4626 if (wSpareBlockNum
<= NUM_FREE_BLOCKS_GATE
) {
4627 wResult
= GLOB_FTL_Garbage_Collection();
4628 if (PASS
== wResult
)
4633 wTempNode
= (u32
)(DISCARD_BLOCK
| pbt
[wBlockNum
]);
4634 wReplacedNode
= (u32
)((~SPARE_BLOCK
) &
4635 pbt
[wLeastReadIndex
]);
4637 pbt
[wBlockNum
] = wReplacedNode
;
4638 pbt
[wLeastReadIndex
] = wTempNode
;
4639 p_BTableChangesDelta
=
4640 (struct BTableChangesDelta
*)g_pBTDelta_Free
;
4641 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
4643 p_BTableChangesDelta
->ftl_cmd_cnt
=
4645 p_BTableChangesDelta
->BT_Index
= wBlockNum
;
4646 p_BTableChangesDelta
->BT_Entry_Value
= pbt
[wBlockNum
];
4647 p_BTableChangesDelta
->ValidFields
= 0x0C;
4649 p_BTableChangesDelta
=
4650 (struct BTableChangesDelta
*)g_pBTDelta_Free
;
4651 g_pBTDelta_Free
+= sizeof(struct BTableChangesDelta
);
4653 p_BTableChangesDelta
->ftl_cmd_cnt
=
4655 p_BTableChangesDelta
->BT_Index
= wLeastReadIndex
;
4656 p_BTableChangesDelta
->BT_Entry_Value
=
4657 pbt
[wLeastReadIndex
];
4658 p_BTableChangesDelta
->ValidFields
= 0x0C;
4660 wResult
= GLOB_LLD_Read_Page_Main_cdma(g_pTempBuf
,
4661 dwOldBlockAddr
, 0, DeviceInfo
.wPagesPerBlock
,
4662 LLD_CMD_FLAG_MODE_CDMA
);
4663 if (wResult
== FAIL
)
4668 if (wResult
!= FAIL
) {
4669 if (FAIL
== GLOB_LLD_Write_Page_Main_cdma(
4670 g_pTempBuf
, pbt
[wBlockNum
], 0,
4671 DeviceInfo
.wPagesPerBlock
)) {
4672 nand_dbg_print(NAND_DBG_WARN
,
4673 "NAND Program fail in "
4674 "%s, Line %d, Function: %s, "
4677 __FILE__
, __LINE__
, __func__
,
4678 (int)pbt
[wBlockNum
]);
4680 MARK_BLOCK_AS_BAD(pbt
[wBlockNum
]);
4685 wResult
= GLOB_LLD_Read_Page_Main(g_pTempBuf
,
4686 dwOldBlockAddr
, 0, DeviceInfo
.wPagesPerBlock
);
4687 if (wResult
== FAIL
)
4690 if (wResult
!= FAIL
) {
4691 /* This is a bug. At this time, pbt[wBlockNum]
4692 is still the physical address of
4693 discard block, and should not be write.
4694 Have fixed it as below.
4695 -- Yunpeng 2008.12.19
4697 wResult
= GLOB_LLD_Write_Page_Main(g_pTempBuf
,
4699 DeviceInfo
.wPagesPerBlock
);
4700 if (wResult
== FAIL
) {
4701 nand_dbg_print(NAND_DBG_WARN
,
4702 "NAND Program fail in "
4703 "%s, Line %d, Function: %s, "
4706 __FILE__
, __LINE__
, __func__
,
4707 (int)wReplacedNode
);
4708 MARK_BLOCK_AS_BAD(wReplacedNode
);
4710 pbt
[wBlockNum
] = wReplacedNode
;
4711 pbt
[wLeastReadIndex
] = wTempNode
;
4715 if ((wResult
== PASS
) && (g_cBlockTableStatus
!=
4716 IN_PROGRESS_BLOCK_TABLE
)) {
4717 g_cBlockTableStatus
= IN_PROGRESS_BLOCK_TABLE
;
4718 FTL_Write_IN_Progress_Block_Table_Page();
4722 } while (wResult
!= PASS
)