Merge branch 'stable/fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / spectra / flash.c
1 /*
2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20 #include <linux/fs.h>
21 #include <linux/slab.h>
22
23 #include "flash.h"
24 #include "ffsdefs.h"
25 #include "lld.h"
26 #include "lld_nand.h"
27 #if CMD_DMA
28 #include "lld_cdma.h"
29 #endif
30
31 #define BLK_FROM_ADDR(addr) ((u32)(addr >> DeviceInfo.nBitsInBlockDataSize))
32 #define PAGE_FROM_ADDR(addr, Block) ((u16)((addr - (u64)Block * \
33 DeviceInfo.wBlockDataSize) >> DeviceInfo.nBitsInPageDataSize))
34
35 #define IS_SPARE_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
36 BAD_BLOCK) && SPARE_BLOCK == (pbt[blk] & SPARE_BLOCK))
37
38 #define IS_DATA_BLOCK(blk) (0 == (pbt[blk] & BAD_BLOCK))
39
40 #define IS_DISCARDED_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
41 BAD_BLOCK) && DISCARD_BLOCK == (pbt[blk] & DISCARD_BLOCK))
42
43 #define IS_BAD_BLOCK(blk) (BAD_BLOCK == (pbt[blk] & BAD_BLOCK))
44
45 #if DEBUG_BNDRY
46 void debug_boundary_lineno_error(int chnl, int limit, int no,
47 int lineno, char *filename)
48 {
49 if (chnl >= limit)
50 printk(KERN_ERR "Boundary Check Fail value %d >= limit %d, "
51 "at %s:%d. Other info:%d. Aborting...\n",
52 chnl, limit, filename, lineno, no);
53 }
54 /* static int globalmemsize; */
55 #endif
56
57 static u16 FTL_Cache_If_Hit(u64 dwPageAddr);
58 static int FTL_Cache_Read(u64 dwPageAddr);
59 static void FTL_Cache_Read_Page(u8 *pData, u64 dwPageAddr,
60 u16 cache_blk);
61 static void FTL_Cache_Write_Page(u8 *pData, u64 dwPageAddr,
62 u8 cache_blk, u16 flag);
63 static int FTL_Cache_Write(void);
64 static int FTL_Cache_Write_Back(u8 *pData, u64 blk_addr);
65 static void FTL_Calculate_LRU(void);
66 static u32 FTL_Get_Block_Index(u32 wBlockNum);
67
68 static int FTL_Search_Block_Table_IN_Block(u32 BT_Block,
69 u8 BT_Tag, u16 *Page);
70 static int FTL_Read_Block_Table(void);
71 static int FTL_Write_Block_Table(int wForce);
72 static int FTL_Write_Block_Table_Data(void);
73 static int FTL_Check_Block_Table(int wOldTable);
74 static int FTL_Static_Wear_Leveling(void);
75 static u32 FTL_Replace_Block_Table(void);
76 static int FTL_Write_IN_Progress_Block_Table_Page(void);
77
78 static u32 FTL_Get_Page_Num(u64 length);
79 static u64 FTL_Get_Physical_Block_Addr(u64 blk_addr);
80
81 static u32 FTL_Replace_OneBlock(u32 wBlockNum,
82 u32 wReplaceNum);
83 static u32 FTL_Replace_LWBlock(u32 wBlockNum,
84 int *pGarbageCollect);
85 static u32 FTL_Replace_MWBlock(void);
86 static int FTL_Replace_Block(u64 blk_addr);
87 static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX);
88
89 static int FTL_Flash_Error_Handle(u8 *pData, u64 old_page_addr, u64 blk_addr);
90
91 struct device_info_tag DeviceInfo;
92 struct flash_cache_tag Cache;
93 static struct spectra_l2_cache_info cache_l2;
94
95 static u8 *cache_l2_page_buf;
96 static u8 *cache_l2_blk_buf;
97
98 u8 *g_pBlockTable;
99 u8 *g_pWearCounter;
100 u16 *g_pReadCounter;
101 u32 *g_pBTBlocks;
102 static u16 g_wBlockTableOffset;
103 static u32 g_wBlockTableIndex;
104 static u8 g_cBlockTableStatus;
105
106 static u8 *g_pTempBuf;
107 static u8 *flag_check_blk_table;
108 static u8 *tmp_buf_search_bt_in_block;
109 static u8 *spare_buf_search_bt_in_block;
110 static u8 *spare_buf_bt_search_bt_in_block;
111 static u8 *tmp_buf1_read_blk_table;
112 static u8 *tmp_buf2_read_blk_table;
113 static u8 *flags_static_wear_leveling;
114 static u8 *tmp_buf_write_blk_table_data;
115 static u8 *tmp_buf_read_disturbance;
116
117 u8 *buf_read_page_main_spare;
118 u8 *buf_write_page_main_spare;
119 u8 *buf_read_page_spare;
120 u8 *buf_get_bad_block;
121
122 #if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA)
123 struct flash_cache_delta_list_tag int_cache[MAX_CHANS + MAX_DESCS];
124 struct flash_cache_tag cache_start_copy;
125 #endif
126
127 int g_wNumFreeBlocks;
128 u8 g_SBDCmdIndex;
129
130 static u8 *g_pIPF;
131 static u8 bt_flag = FIRST_BT_ID;
132 static u8 bt_block_changed;
133
134 static u16 cache_block_to_write;
135 static u8 last_erased = FIRST_BT_ID;
136
137 static u8 GC_Called;
138 static u8 BT_GC_Called;
139
140 #if CMD_DMA
141 #define COPY_BACK_BUF_NUM 10
142
143 static u8 ftl_cmd_cnt; /* Init value is 0 */
144 u8 *g_pBTDelta;
145 u8 *g_pBTDelta_Free;
146 u8 *g_pBTStartingCopy;
147 u8 *g_pWearCounterCopy;
148 u16 *g_pReadCounterCopy;
149 u8 *g_pBlockTableCopies;
150 u8 *g_pNextBlockTable;
151 static u8 *cp_back_buf_copies[COPY_BACK_BUF_NUM];
152 static int cp_back_buf_idx;
153
154 static u8 *g_temp_buf;
155
156 #pragma pack(push, 1)
157 #pragma pack(1)
158 struct BTableChangesDelta {
159 u8 ftl_cmd_cnt;
160 u8 ValidFields;
161 u16 g_wBlockTableOffset;
162 u32 g_wBlockTableIndex;
163 u32 BT_Index;
164 u32 BT_Entry_Value;
165 u32 WC_Index;
166 u8 WC_Entry_Value;
167 u32 RC_Index;
168 u16 RC_Entry_Value;
169 };
170
171 #pragma pack(pop)
172
173 struct BTableChangesDelta *p_BTableChangesDelta;
174 #endif
175
176
177 #define MARK_BLOCK_AS_BAD(blocknode) (blocknode |= BAD_BLOCK)
178 #define MARK_BLK_AS_DISCARD(blk) (blk = (blk & ~SPARE_BLOCK) | DISCARD_BLOCK)
179
180 #define FTL_Get_LBAPBA_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
181 sizeof(u32))
182 #define FTL_Get_WearCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
183 sizeof(u8))
184 #define FTL_Get_ReadCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
185 sizeof(u16))
186 #if SUPPORT_LARGE_BLOCKNUM
187 #define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
188 sizeof(u8) * 3)
189 #else
190 #define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
191 sizeof(u16))
192 #endif
193 #define FTL_Get_WearCounter_Table_Flash_Size_Bytes \
194 FTL_Get_WearCounter_Table_Mem_Size_Bytes
195 #define FTL_Get_ReadCounter_Table_Flash_Size_Bytes \
196 FTL_Get_ReadCounter_Table_Mem_Size_Bytes
197
198 static u32 FTL_Get_Block_Table_Flash_Size_Bytes(void)
199 {
200 u32 byte_num;
201
202 if (DeviceInfo.MLCDevice) {
203 byte_num = FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
204 DeviceInfo.wDataBlockNum * sizeof(u8) +
205 DeviceInfo.wDataBlockNum * sizeof(u16);
206 } else {
207 byte_num = FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
208 DeviceInfo.wDataBlockNum * sizeof(u8);
209 }
210
211 byte_num += 4 * sizeof(u8);
212
213 return byte_num;
214 }
215
216 static u16 FTL_Get_Block_Table_Flash_Size_Pages(void)
217 {
218 return (u16)FTL_Get_Page_Num(FTL_Get_Block_Table_Flash_Size_Bytes());
219 }
220
221 static int FTL_Copy_Block_Table_To_Flash(u8 *flashBuf, u32 sizeToTx,
222 u32 sizeTxed)
223 {
224 u32 wBytesCopied, blk_tbl_size, wBytes;
225 u32 *pbt = (u32 *)g_pBlockTable;
226
227 blk_tbl_size = FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
228 for (wBytes = 0;
229 (wBytes < sizeToTx) && ((wBytes + sizeTxed) < blk_tbl_size);
230 wBytes++) {
231 #if SUPPORT_LARGE_BLOCKNUM
232 flashBuf[wBytes] = (u8)(pbt[(wBytes + sizeTxed) / 3]
233 >> (((wBytes + sizeTxed) % 3) ?
234 ((((wBytes + sizeTxed) % 3) == 2) ? 0 : 8) : 16)) & 0xFF;
235 #else
236 flashBuf[wBytes] = (u8)(pbt[(wBytes + sizeTxed) / 2]
237 >> (((wBytes + sizeTxed) % 2) ? 0 : 8)) & 0xFF;
238 #endif
239 }
240
241 sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
242 blk_tbl_size = FTL_Get_WearCounter_Table_Flash_Size_Bytes();
243 wBytesCopied = wBytes;
244 wBytes = ((blk_tbl_size - sizeTxed) > (sizeToTx - wBytesCopied)) ?
245 (sizeToTx - wBytesCopied) : (blk_tbl_size - sizeTxed);
246 memcpy(flashBuf + wBytesCopied, g_pWearCounter + sizeTxed, wBytes);
247
248 sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
249
250 if (DeviceInfo.MLCDevice) {
251 blk_tbl_size = FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
252 wBytesCopied += wBytes;
253 for (wBytes = 0; ((wBytes + wBytesCopied) < sizeToTx) &&
254 ((wBytes + sizeTxed) < blk_tbl_size); wBytes++)
255 flashBuf[wBytes + wBytesCopied] =
256 (g_pReadCounter[(wBytes + sizeTxed) / 2] >>
257 (((wBytes + sizeTxed) % 2) ? 0 : 8)) & 0xFF;
258 }
259
260 return wBytesCopied + wBytes;
261 }
262
263 static int FTL_Copy_Block_Table_From_Flash(u8 *flashBuf,
264 u32 sizeToTx, u32 sizeTxed)
265 {
266 u32 wBytesCopied, blk_tbl_size, wBytes;
267 u32 *pbt = (u32 *)g_pBlockTable;
268
269 blk_tbl_size = FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
270 for (wBytes = 0; (wBytes < sizeToTx) &&
271 ((wBytes + sizeTxed) < blk_tbl_size); wBytes++) {
272 #if SUPPORT_LARGE_BLOCKNUM
273 if (!((wBytes + sizeTxed) % 3))
274 pbt[(wBytes + sizeTxed) / 3] = 0;
275 pbt[(wBytes + sizeTxed) / 3] |=
276 (flashBuf[wBytes] << (((wBytes + sizeTxed) % 3) ?
277 ((((wBytes + sizeTxed) % 3) == 2) ? 0 : 8) : 16));
278 #else
279 if (!((wBytes + sizeTxed) % 2))
280 pbt[(wBytes + sizeTxed) / 2] = 0;
281 pbt[(wBytes + sizeTxed) / 2] |=
282 (flashBuf[wBytes] << (((wBytes + sizeTxed) % 2) ?
283 0 : 8));
284 #endif
285 }
286
287 sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
288 blk_tbl_size = FTL_Get_WearCounter_Table_Flash_Size_Bytes();
289 wBytesCopied = wBytes;
290 wBytes = ((blk_tbl_size - sizeTxed) > (sizeToTx - wBytesCopied)) ?
291 (sizeToTx - wBytesCopied) : (blk_tbl_size - sizeTxed);
292 memcpy(g_pWearCounter + sizeTxed, flashBuf + wBytesCopied, wBytes);
293 sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
294
295 if (DeviceInfo.MLCDevice) {
296 wBytesCopied += wBytes;
297 blk_tbl_size = FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
298 for (wBytes = 0; ((wBytes + wBytesCopied) < sizeToTx) &&
299 ((wBytes + sizeTxed) < blk_tbl_size); wBytes++) {
300 if (((wBytes + sizeTxed) % 2))
301 g_pReadCounter[(wBytes + sizeTxed) / 2] = 0;
302 g_pReadCounter[(wBytes + sizeTxed) / 2] |=
303 (flashBuf[wBytes] <<
304 (((wBytes + sizeTxed) % 2) ? 0 : 8));
305 }
306 }
307
308 return wBytesCopied+wBytes;
309 }
310
311 static int FTL_Insert_Block_Table_Signature(u8 *buf, u8 tag)
312 {
313 int i;
314
315 for (i = 0; i < BTSIG_BYTES; i++)
316 buf[BTSIG_OFFSET + i] =
317 ((tag + (i * BTSIG_DELTA) - FIRST_BT_ID) %
318 (1 + LAST_BT_ID-FIRST_BT_ID)) + FIRST_BT_ID;
319
320 return PASS;
321 }
322
323 static int FTL_Extract_Block_Table_Tag(u8 *buf, u8 **tagarray)
324 {
325 static u8 tag[BTSIG_BYTES >> 1];
326 int i, j, k, tagi, tagtemp, status;
327
328 *tagarray = (u8 *)tag;
329 tagi = 0;
330
331 for (i = 0; i < (BTSIG_BYTES - 1); i++) {
332 for (j = i + 1; (j < BTSIG_BYTES) &&
333 (tagi < (BTSIG_BYTES >> 1)); j++) {
334 tagtemp = buf[BTSIG_OFFSET + j] -
335 buf[BTSIG_OFFSET + i];
336 if (tagtemp && !(tagtemp % BTSIG_DELTA)) {
337 tagtemp = (buf[BTSIG_OFFSET + i] +
338 (1 + LAST_BT_ID - FIRST_BT_ID) -
339 (i * BTSIG_DELTA)) %
340 (1 + LAST_BT_ID - FIRST_BT_ID);
341 status = FAIL;
342 for (k = 0; k < tagi; k++) {
343 if (tagtemp == tag[k])
344 status = PASS;
345 }
346
347 if (status == FAIL) {
348 tag[tagi++] = tagtemp;
349 i = (j == (i + 1)) ? i + 1 : i;
350 j = (j == (i + 1)) ? i + 1 : i;
351 }
352 }
353 }
354 }
355
356 return tagi;
357 }
358
359
360 static int FTL_Execute_SPL_Recovery(void)
361 {
362 u32 j, block, blks;
363 u32 *pbt = (u32 *)g_pBlockTable;
364 int ret;
365
366 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
367 __FILE__, __LINE__, __func__);
368
369 blks = DeviceInfo.wSpectraEndBlock - DeviceInfo.wSpectraStartBlock;
370 for (j = 0; j <= blks; j++) {
371 block = (pbt[j]);
372 if (((block & BAD_BLOCK) != BAD_BLOCK) &&
373 ((block & SPARE_BLOCK) == SPARE_BLOCK)) {
374 ret = GLOB_LLD_Erase_Block(block & ~BAD_BLOCK);
375 if (FAIL == ret) {
376 nand_dbg_print(NAND_DBG_WARN,
377 "NAND Program fail in %s, Line %d, "
378 "Function: %s, new Bad Block %d "
379 "generated!\n",
380 __FILE__, __LINE__, __func__,
381 (int)(block & ~BAD_BLOCK));
382 MARK_BLOCK_AS_BAD(pbt[j]);
383 }
384 }
385 }
386
387 return PASS;
388 }
389
390 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
391 * Function: GLOB_FTL_IdentifyDevice
392 * Inputs: pointer to identify data structure
393 * Outputs: PASS / FAIL
394 * Description: the identify data structure is filled in with
395 * information for the block driver.
396 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
397 int GLOB_FTL_IdentifyDevice(struct spectra_indentfy_dev_tag *dev_data)
398 {
399 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
400 __FILE__, __LINE__, __func__);
401
402 dev_data->NumBlocks = DeviceInfo.wTotalBlocks;
403 dev_data->PagesPerBlock = DeviceInfo.wPagesPerBlock;
404 dev_data->PageDataSize = DeviceInfo.wPageDataSize;
405 dev_data->wECCBytesPerSector = DeviceInfo.wECCBytesPerSector;
406 dev_data->wDataBlockNum = DeviceInfo.wDataBlockNum;
407
408 return PASS;
409 }
410
411 /* ..... */
412 static int allocate_memory(void)
413 {
414 u32 block_table_size, page_size, block_size, mem_size;
415 u32 total_bytes = 0;
416 int i;
417 #if CMD_DMA
418 int j;
419 #endif
420
421 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
422 __FILE__, __LINE__, __func__);
423
424 page_size = DeviceInfo.wPageSize;
425 block_size = DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize;
426
427 block_table_size = DeviceInfo.wDataBlockNum *
428 (sizeof(u32) + sizeof(u8) + sizeof(u16));
429 block_table_size += (DeviceInfo.wPageDataSize -
430 (block_table_size % DeviceInfo.wPageDataSize)) %
431 DeviceInfo.wPageDataSize;
432
433 /* Malloc memory for block tables */
434 g_pBlockTable = kmalloc(block_table_size, GFP_ATOMIC);
435 if (!g_pBlockTable)
436 goto block_table_fail;
437 memset(g_pBlockTable, 0, block_table_size);
438 total_bytes += block_table_size;
439
440 g_pWearCounter = (u8 *)(g_pBlockTable +
441 DeviceInfo.wDataBlockNum * sizeof(u32));
442
443 if (DeviceInfo.MLCDevice)
444 g_pReadCounter = (u16 *)(g_pBlockTable +
445 DeviceInfo.wDataBlockNum *
446 (sizeof(u32) + sizeof(u8)));
447
448 /* Malloc memory and init for cache items */
449 for (i = 0; i < CACHE_ITEM_NUM; i++) {
450 Cache.array[i].address = NAND_CACHE_INIT_ADDR;
451 Cache.array[i].use_cnt = 0;
452 Cache.array[i].changed = CLEAR;
453 Cache.array[i].buf = kmalloc(Cache.cache_item_size,
454 GFP_ATOMIC);
455 if (!Cache.array[i].buf)
456 goto cache_item_fail;
457 memset(Cache.array[i].buf, 0, Cache.cache_item_size);
458 total_bytes += Cache.cache_item_size;
459 }
460
461 /* Malloc memory for IPF */
462 g_pIPF = kmalloc(page_size, GFP_ATOMIC);
463 if (!g_pIPF)
464 goto ipf_fail;
465 memset(g_pIPF, 0, page_size);
466 total_bytes += page_size;
467
468 /* Malloc memory for data merging during Level2 Cache flush */
469 cache_l2_page_buf = kmalloc(page_size, GFP_ATOMIC);
470 if (!cache_l2_page_buf)
471 goto cache_l2_page_buf_fail;
472 memset(cache_l2_page_buf, 0xff, page_size);
473 total_bytes += page_size;
474
475 cache_l2_blk_buf = kmalloc(block_size, GFP_ATOMIC);
476 if (!cache_l2_blk_buf)
477 goto cache_l2_blk_buf_fail;
478 memset(cache_l2_blk_buf, 0xff, block_size);
479 total_bytes += block_size;
480
481 /* Malloc memory for temp buffer */
482 g_pTempBuf = kmalloc(Cache.cache_item_size, GFP_ATOMIC);
483 if (!g_pTempBuf)
484 goto Temp_buf_fail;
485 memset(g_pTempBuf, 0, Cache.cache_item_size);
486 total_bytes += Cache.cache_item_size;
487
488 /* Malloc memory for block table blocks */
489 mem_size = (1 + LAST_BT_ID - FIRST_BT_ID) * sizeof(u32);
490 g_pBTBlocks = kmalloc(mem_size, GFP_ATOMIC);
491 if (!g_pBTBlocks)
492 goto bt_blocks_fail;
493 memset(g_pBTBlocks, 0xff, mem_size);
494 total_bytes += mem_size;
495
496 /* Malloc memory for function FTL_Check_Block_Table */
497 flag_check_blk_table = kmalloc(DeviceInfo.wDataBlockNum, GFP_ATOMIC);
498 if (!flag_check_blk_table)
499 goto flag_check_blk_table_fail;
500 total_bytes += DeviceInfo.wDataBlockNum;
501
502 /* Malloc memory for function FTL_Search_Block_Table_IN_Block */
503 tmp_buf_search_bt_in_block = kmalloc(page_size, GFP_ATOMIC);
504 if (!tmp_buf_search_bt_in_block)
505 goto tmp_buf_search_bt_in_block_fail;
506 memset(tmp_buf_search_bt_in_block, 0xff, page_size);
507 total_bytes += page_size;
508
509 mem_size = DeviceInfo.wPageSize - DeviceInfo.wPageDataSize;
510 spare_buf_search_bt_in_block = kmalloc(mem_size, GFP_ATOMIC);
511 if (!spare_buf_search_bt_in_block)
512 goto spare_buf_search_bt_in_block_fail;
513 memset(spare_buf_search_bt_in_block, 0xff, mem_size);
514 total_bytes += mem_size;
515
516 spare_buf_bt_search_bt_in_block = kmalloc(mem_size, GFP_ATOMIC);
517 if (!spare_buf_bt_search_bt_in_block)
518 goto spare_buf_bt_search_bt_in_block_fail;
519 memset(spare_buf_bt_search_bt_in_block, 0xff, mem_size);
520 total_bytes += mem_size;
521
522 /* Malloc memory for function FTL_Read_Block_Table */
523 tmp_buf1_read_blk_table = kmalloc(page_size, GFP_ATOMIC);
524 if (!tmp_buf1_read_blk_table)
525 goto tmp_buf1_read_blk_table_fail;
526 memset(tmp_buf1_read_blk_table, 0xff, page_size);
527 total_bytes += page_size;
528
529 tmp_buf2_read_blk_table = kmalloc(page_size, GFP_ATOMIC);
530 if (!tmp_buf2_read_blk_table)
531 goto tmp_buf2_read_blk_table_fail;
532 memset(tmp_buf2_read_blk_table, 0xff, page_size);
533 total_bytes += page_size;
534
535 /* Malloc memory for function FTL_Static_Wear_Leveling */
536 flags_static_wear_leveling = kmalloc(DeviceInfo.wDataBlockNum,
537 GFP_ATOMIC);
538 if (!flags_static_wear_leveling)
539 goto flags_static_wear_leveling_fail;
540 total_bytes += DeviceInfo.wDataBlockNum;
541
542 /* Malloc memory for function FTL_Write_Block_Table_Data */
543 if (FTL_Get_Block_Table_Flash_Size_Pages() > 3)
544 mem_size = FTL_Get_Block_Table_Flash_Size_Bytes() -
545 2 * DeviceInfo.wPageSize;
546 else
547 mem_size = DeviceInfo.wPageSize;
548 tmp_buf_write_blk_table_data = kmalloc(mem_size, GFP_ATOMIC);
549 if (!tmp_buf_write_blk_table_data)
550 goto tmp_buf_write_blk_table_data_fail;
551 memset(tmp_buf_write_blk_table_data, 0xff, mem_size);
552 total_bytes += mem_size;
553
554 /* Malloc memory for function FTL_Read_Disturbance */
555 tmp_buf_read_disturbance = kmalloc(block_size, GFP_ATOMIC);
556 if (!tmp_buf_read_disturbance)
557 goto tmp_buf_read_disturbance_fail;
558 memset(tmp_buf_read_disturbance, 0xff, block_size);
559 total_bytes += block_size;
560
561 /* Alloc mem for function NAND_Read_Page_Main_Spare of lld_nand.c */
562 buf_read_page_main_spare = kmalloc(DeviceInfo.wPageSize, GFP_ATOMIC);
563 if (!buf_read_page_main_spare)
564 goto buf_read_page_main_spare_fail;
565 total_bytes += DeviceInfo.wPageSize;
566
567 /* Alloc mem for function NAND_Write_Page_Main_Spare of lld_nand.c */
568 buf_write_page_main_spare = kmalloc(DeviceInfo.wPageSize, GFP_ATOMIC);
569 if (!buf_write_page_main_spare)
570 goto buf_write_page_main_spare_fail;
571 total_bytes += DeviceInfo.wPageSize;
572
573 /* Alloc mem for function NAND_Read_Page_Spare of lld_nand.c */
574 buf_read_page_spare = kmalloc(DeviceInfo.wPageSpareSize, GFP_ATOMIC);
575 if (!buf_read_page_spare)
576 goto buf_read_page_spare_fail;
577 memset(buf_read_page_spare, 0xff, DeviceInfo.wPageSpareSize);
578 total_bytes += DeviceInfo.wPageSpareSize;
579
580 /* Alloc mem for function NAND_Get_Bad_Block of lld_nand.c */
581 buf_get_bad_block = kmalloc(DeviceInfo.wPageSpareSize, GFP_ATOMIC);
582 if (!buf_get_bad_block)
583 goto buf_get_bad_block_fail;
584 memset(buf_get_bad_block, 0xff, DeviceInfo.wPageSpareSize);
585 total_bytes += DeviceInfo.wPageSpareSize;
586
587 #if CMD_DMA
588 g_temp_buf = kmalloc(block_size, GFP_ATOMIC);
589 if (!g_temp_buf)
590 goto temp_buf_fail;
591 memset(g_temp_buf, 0xff, block_size);
592 total_bytes += block_size;
593
594 /* Malloc memory for copy of block table used in CDMA mode */
595 g_pBTStartingCopy = kmalloc(block_table_size, GFP_ATOMIC);
596 if (!g_pBTStartingCopy)
597 goto bt_starting_copy;
598 memset(g_pBTStartingCopy, 0, block_table_size);
599 total_bytes += block_table_size;
600
601 g_pWearCounterCopy = (u8 *)(g_pBTStartingCopy +
602 DeviceInfo.wDataBlockNum * sizeof(u32));
603
604 if (DeviceInfo.MLCDevice)
605 g_pReadCounterCopy = (u16 *)(g_pBTStartingCopy +
606 DeviceInfo.wDataBlockNum *
607 (sizeof(u32) + sizeof(u8)));
608
609 /* Malloc memory for block table copies */
610 mem_size = 5 * DeviceInfo.wDataBlockNum * sizeof(u32) +
611 5 * DeviceInfo.wDataBlockNum * sizeof(u8);
612 if (DeviceInfo.MLCDevice)
613 mem_size += 5 * DeviceInfo.wDataBlockNum * sizeof(u16);
614 g_pBlockTableCopies = kmalloc(mem_size, GFP_ATOMIC);
615 if (!g_pBlockTableCopies)
616 goto blk_table_copies_fail;
617 memset(g_pBlockTableCopies, 0, mem_size);
618 total_bytes += mem_size;
619 g_pNextBlockTable = g_pBlockTableCopies;
620
621 /* Malloc memory for Block Table Delta */
622 mem_size = MAX_DESCS * sizeof(struct BTableChangesDelta);
623 g_pBTDelta = kmalloc(mem_size, GFP_ATOMIC);
624 if (!g_pBTDelta)
625 goto bt_delta_fail;
626 memset(g_pBTDelta, 0, mem_size);
627 total_bytes += mem_size;
628 g_pBTDelta_Free = g_pBTDelta;
629
630 /* Malloc memory for Copy Back Buffers */
631 for (j = 0; j < COPY_BACK_BUF_NUM; j++) {
632 cp_back_buf_copies[j] = kmalloc(block_size, GFP_ATOMIC);
633 if (!cp_back_buf_copies[j])
634 goto cp_back_buf_copies_fail;
635 memset(cp_back_buf_copies[j], 0, block_size);
636 total_bytes += block_size;
637 }
638 cp_back_buf_idx = 0;
639
640 /* Malloc memory for pending commands list */
641 mem_size = sizeof(struct pending_cmd) * MAX_DESCS;
642 info.pcmds = kzalloc(mem_size, GFP_KERNEL);
643 if (!info.pcmds)
644 goto pending_cmds_buf_fail;
645 total_bytes += mem_size;
646
647 /* Malloc memory for CDMA descripter table */
648 mem_size = sizeof(struct cdma_descriptor) * MAX_DESCS;
649 info.cdma_desc_buf = kzalloc(mem_size, GFP_KERNEL);
650 if (!info.cdma_desc_buf)
651 goto cdma_desc_buf_fail;
652 total_bytes += mem_size;
653
654 /* Malloc memory for Memcpy descripter table */
655 mem_size = sizeof(struct memcpy_descriptor) * MAX_DESCS;
656 info.memcp_desc_buf = kzalloc(mem_size, GFP_KERNEL);
657 if (!info.memcp_desc_buf)
658 goto memcp_desc_buf_fail;
659 total_bytes += mem_size;
660 #endif
661
662 nand_dbg_print(NAND_DBG_WARN,
663 "Total memory allocated in FTL layer: %d\n", total_bytes);
664
665 return PASS;
666
667 #if CMD_DMA
668 memcp_desc_buf_fail:
669 kfree(info.cdma_desc_buf);
670 cdma_desc_buf_fail:
671 kfree(info.pcmds);
672 pending_cmds_buf_fail:
673 cp_back_buf_copies_fail:
674 j--;
675 for (; j >= 0; j--)
676 kfree(cp_back_buf_copies[j]);
677 kfree(g_pBTDelta);
678 bt_delta_fail:
679 kfree(g_pBlockTableCopies);
680 blk_table_copies_fail:
681 kfree(g_pBTStartingCopy);
682 bt_starting_copy:
683 kfree(g_temp_buf);
684 temp_buf_fail:
685 kfree(buf_get_bad_block);
686 #endif
687
688 buf_get_bad_block_fail:
689 kfree(buf_read_page_spare);
690 buf_read_page_spare_fail:
691 kfree(buf_write_page_main_spare);
692 buf_write_page_main_spare_fail:
693 kfree(buf_read_page_main_spare);
694 buf_read_page_main_spare_fail:
695 kfree(tmp_buf_read_disturbance);
696 tmp_buf_read_disturbance_fail:
697 kfree(tmp_buf_write_blk_table_data);
698 tmp_buf_write_blk_table_data_fail:
699 kfree(flags_static_wear_leveling);
700 flags_static_wear_leveling_fail:
701 kfree(tmp_buf2_read_blk_table);
702 tmp_buf2_read_blk_table_fail:
703 kfree(tmp_buf1_read_blk_table);
704 tmp_buf1_read_blk_table_fail:
705 kfree(spare_buf_bt_search_bt_in_block);
706 spare_buf_bt_search_bt_in_block_fail:
707 kfree(spare_buf_search_bt_in_block);
708 spare_buf_search_bt_in_block_fail:
709 kfree(tmp_buf_search_bt_in_block);
710 tmp_buf_search_bt_in_block_fail:
711 kfree(flag_check_blk_table);
712 flag_check_blk_table_fail:
713 kfree(g_pBTBlocks);
714 bt_blocks_fail:
715 kfree(g_pTempBuf);
716 Temp_buf_fail:
717 kfree(cache_l2_blk_buf);
718 cache_l2_blk_buf_fail:
719 kfree(cache_l2_page_buf);
720 cache_l2_page_buf_fail:
721 kfree(g_pIPF);
722 ipf_fail:
723 cache_item_fail:
724 i--;
725 for (; i >= 0; i--)
726 kfree(Cache.array[i].buf);
727 kfree(g_pBlockTable);
728 block_table_fail:
729 printk(KERN_ERR "Failed to kmalloc memory in %s Line %d.\n",
730 __FILE__, __LINE__);
731
732 return -ENOMEM;
733 }
734
735 /* .... */
736 static int free_memory(void)
737 {
738 int i;
739
740 #if CMD_DMA
741 kfree(info.memcp_desc_buf);
742 kfree(info.cdma_desc_buf);
743 kfree(info.pcmds);
744 for (i = COPY_BACK_BUF_NUM - 1; i >= 0; i--)
745 kfree(cp_back_buf_copies[i]);
746 kfree(g_pBTDelta);
747 kfree(g_pBlockTableCopies);
748 kfree(g_pBTStartingCopy);
749 kfree(g_temp_buf);
750 kfree(buf_get_bad_block);
751 #endif
752 kfree(buf_read_page_spare);
753 kfree(buf_write_page_main_spare);
754 kfree(buf_read_page_main_spare);
755 kfree(tmp_buf_read_disturbance);
756 kfree(tmp_buf_write_blk_table_data);
757 kfree(flags_static_wear_leveling);
758 kfree(tmp_buf2_read_blk_table);
759 kfree(tmp_buf1_read_blk_table);
760 kfree(spare_buf_bt_search_bt_in_block);
761 kfree(spare_buf_search_bt_in_block);
762 kfree(tmp_buf_search_bt_in_block);
763 kfree(flag_check_blk_table);
764 kfree(g_pBTBlocks);
765 kfree(g_pTempBuf);
766 kfree(g_pIPF);
767 for (i = CACHE_ITEM_NUM - 1; i >= 0; i--)
768 kfree(Cache.array[i].buf);
769 kfree(g_pBlockTable);
770
771 return 0;
772 }
773
774 static void dump_cache_l2_table(void)
775 {
776 struct list_head *p;
777 struct spectra_l2_cache_list *pnd;
778 int n, i;
779
780 n = 0;
781 list_for_each(p, &cache_l2.table.list) {
782 pnd = list_entry(p, struct spectra_l2_cache_list, list);
783 nand_dbg_print(NAND_DBG_WARN, "dump_cache_l2_table node: %d, logical_blk_num: %d\n", n, pnd->logical_blk_num);
784 /*
785 for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) {
786 if (pnd->pages_array[i] != MAX_U32_VALUE)
787 nand_dbg_print(NAND_DBG_WARN, " pages_array[%d]: 0x%x\n", i, pnd->pages_array[i]);
788 }
789 */
790 n++;
791 }
792 }
793
794 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
795 * Function: GLOB_FTL_Init
796 * Inputs: none
797 * Outputs: PASS=0 / FAIL=1
798 * Description: allocates the memory for cache array,
799 * important data structures
800 * clears the cache array
801 * reads the block table from flash into array
802 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
803 int GLOB_FTL_Init(void)
804 {
805 int i;
806
807 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
808 __FILE__, __LINE__, __func__);
809
810 Cache.pages_per_item = 1;
811 Cache.cache_item_size = 1 * DeviceInfo.wPageDataSize;
812
813 if (allocate_memory() != PASS)
814 return FAIL;
815
816 #if CMD_DMA
817 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
818 memcpy((void *)&cache_start_copy, (void *)&Cache,
819 sizeof(struct flash_cache_tag));
820 memset((void *)&int_cache, -1,
821 sizeof(struct flash_cache_delta_list_tag) *
822 (MAX_CHANS + MAX_DESCS));
823 #endif
824 ftl_cmd_cnt = 0;
825 #endif
826
827 if (FTL_Read_Block_Table() != PASS)
828 return FAIL;
829
830 /* Init the Level2 Cache data structure */
831 for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++)
832 cache_l2.blk_array[i] = MAX_U32_VALUE;
833 cache_l2.cur_blk_idx = 0;
834 cache_l2.cur_page_num = 0;
835 INIT_LIST_HEAD(&cache_l2.table.list);
836 cache_l2.table.logical_blk_num = MAX_U32_VALUE;
837
838 dump_cache_l2_table();
839
840 return 0;
841 }
842
843
844 #if CMD_DMA
845 #if 0
846 static void save_blk_table_changes(u16 idx)
847 {
848 u8 ftl_cmd;
849 u32 *pbt = (u32 *)g_pBTStartingCopy;
850
851 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
852 u16 id;
853 u8 cache_blks;
854
855 id = idx - MAX_CHANS;
856 if (int_cache[id].item != -1) {
857 cache_blks = int_cache[id].item;
858 cache_start_copy.array[cache_blks].address =
859 int_cache[id].cache.address;
860 cache_start_copy.array[cache_blks].changed =
861 int_cache[id].cache.changed;
862 }
863 #endif
864
865 ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
866
867 while (ftl_cmd <= PendingCMD[idx].Tag) {
868 if (p_BTableChangesDelta->ValidFields == 0x01) {
869 g_wBlockTableOffset =
870 p_BTableChangesDelta->g_wBlockTableOffset;
871 } else if (p_BTableChangesDelta->ValidFields == 0x0C) {
872 pbt[p_BTableChangesDelta->BT_Index] =
873 p_BTableChangesDelta->BT_Entry_Value;
874 debug_boundary_error(((
875 p_BTableChangesDelta->BT_Index)),
876 DeviceInfo.wDataBlockNum, 0);
877 } else if (p_BTableChangesDelta->ValidFields == 0x03) {
878 g_wBlockTableOffset =
879 p_BTableChangesDelta->g_wBlockTableOffset;
880 g_wBlockTableIndex =
881 p_BTableChangesDelta->g_wBlockTableIndex;
882 } else if (p_BTableChangesDelta->ValidFields == 0x30) {
883 g_pWearCounterCopy[p_BTableChangesDelta->WC_Index] =
884 p_BTableChangesDelta->WC_Entry_Value;
885 } else if ((DeviceInfo.MLCDevice) &&
886 (p_BTableChangesDelta->ValidFields == 0xC0)) {
887 g_pReadCounterCopy[p_BTableChangesDelta->RC_Index] =
888 p_BTableChangesDelta->RC_Entry_Value;
889 nand_dbg_print(NAND_DBG_DEBUG,
890 "In event status setting read counter "
891 "GLOB_ftl_cmd_cnt %u Count %u Index %u\n",
892 ftl_cmd,
893 p_BTableChangesDelta->RC_Entry_Value,
894 (unsigned int)p_BTableChangesDelta->RC_Index);
895 } else {
896 nand_dbg_print(NAND_DBG_DEBUG,
897 "This should never occur \n");
898 }
899 p_BTableChangesDelta += 1;
900 ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
901 }
902 }
903
904 static void discard_cmds(u16 n)
905 {
906 u32 *pbt = (u32 *)g_pBTStartingCopy;
907 u8 ftl_cmd;
908 unsigned long k;
909 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
910 u8 cache_blks;
911 u16 id;
912 #endif
913
914 if ((PendingCMD[n].CMD == WRITE_MAIN_CMD) ||
915 (PendingCMD[n].CMD == WRITE_MAIN_SPARE_CMD)) {
916 for (k = 0; k < DeviceInfo.wDataBlockNum; k++) {
917 if (PendingCMD[n].Block == (pbt[k] & (~BAD_BLOCK)))
918 MARK_BLK_AS_DISCARD(pbt[k]);
919 }
920 }
921
922 ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
923 while (ftl_cmd <= PendingCMD[n].Tag) {
924 p_BTableChangesDelta += 1;
925 ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
926 }
927
928 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
929 id = n - MAX_CHANS;
930
931 if (int_cache[id].item != -1) {
932 cache_blks = int_cache[id].item;
933 if (PendingCMD[n].CMD == MEMCOPY_CMD) {
934 if ((cache_start_copy.array[cache_blks].buf <=
935 PendingCMD[n].DataDestAddr) &&
936 ((cache_start_copy.array[cache_blks].buf +
937 Cache.cache_item_size) >
938 PendingCMD[n].DataDestAddr)) {
939 cache_start_copy.array[cache_blks].address =
940 NAND_CACHE_INIT_ADDR;
941 cache_start_copy.array[cache_blks].use_cnt =
942 0;
943 cache_start_copy.array[cache_blks].changed =
944 CLEAR;
945 }
946 } else {
947 cache_start_copy.array[cache_blks].address =
948 int_cache[id].cache.address;
949 cache_start_copy.array[cache_blks].changed =
950 int_cache[id].cache.changed;
951 }
952 }
953 #endif
954 }
955
956 static void process_cmd_pass(int *first_failed_cmd, u16 idx)
957 {
958 if (0 == *first_failed_cmd)
959 save_blk_table_changes(idx);
960 else
961 discard_cmds(idx);
962 }
963
964 static void process_cmd_fail_abort(int *first_failed_cmd,
965 u16 idx, int event)
966 {
967 u32 *pbt = (u32 *)g_pBTStartingCopy;
968 u8 ftl_cmd;
969 unsigned long i;
970 int erase_fail, program_fail;
971 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
972 u8 cache_blks;
973 u16 id;
974 #endif
975
976 if (0 == *first_failed_cmd)
977 *first_failed_cmd = PendingCMD[idx].SBDCmdIndex;
978
979 nand_dbg_print(NAND_DBG_DEBUG, "Uncorrectable error has occured "
980 "while executing %u Command %u accesing Block %u\n",
981 (unsigned int)p_BTableChangesDelta->ftl_cmd_cnt,
982 PendingCMD[idx].CMD,
983 (unsigned int)PendingCMD[idx].Block);
984
985 ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
986 while (ftl_cmd <= PendingCMD[idx].Tag) {
987 p_BTableChangesDelta += 1;
988 ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
989 }
990
991 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
992 id = idx - MAX_CHANS;
993
994 if (int_cache[id].item != -1) {
995 cache_blks = int_cache[id].item;
996 if ((PendingCMD[idx].CMD == WRITE_MAIN_CMD)) {
997 cache_start_copy.array[cache_blks].address =
998 int_cache[id].cache.address;
999 cache_start_copy.array[cache_blks].changed = SET;
1000 } else if ((PendingCMD[idx].CMD == READ_MAIN_CMD)) {
1001 cache_start_copy.array[cache_blks].address =
1002 NAND_CACHE_INIT_ADDR;
1003 cache_start_copy.array[cache_blks].use_cnt = 0;
1004 cache_start_copy.array[cache_blks].changed =
1005 CLEAR;
1006 } else if (PendingCMD[idx].CMD == ERASE_CMD) {
1007 /* ? */
1008 } else if (PendingCMD[idx].CMD == MEMCOPY_CMD) {
1009 /* ? */
1010 }
1011 }
1012 #endif
1013
1014 erase_fail = (event == EVENT_ERASE_FAILURE) &&
1015 (PendingCMD[idx].CMD == ERASE_CMD);
1016
1017 program_fail = (event == EVENT_PROGRAM_FAILURE) &&
1018 ((PendingCMD[idx].CMD == WRITE_MAIN_CMD) ||
1019 (PendingCMD[idx].CMD == WRITE_MAIN_SPARE_CMD));
1020
1021 if (erase_fail || program_fail) {
1022 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1023 if (PendingCMD[idx].Block ==
1024 (pbt[i] & (~BAD_BLOCK)))
1025 MARK_BLOCK_AS_BAD(pbt[i]);
1026 }
1027 }
1028 }
1029
1030 static void process_cmd(int *first_failed_cmd, u16 idx, int event)
1031 {
1032 u8 ftl_cmd;
1033 int cmd_match = 0;
1034
1035 if (p_BTableChangesDelta->ftl_cmd_cnt == PendingCMD[idx].Tag)
1036 cmd_match = 1;
1037
1038 if (PendingCMD[idx].Status == CMD_PASS) {
1039 process_cmd_pass(first_failed_cmd, idx);
1040 } else if ((PendingCMD[idx].Status == CMD_FAIL) ||
1041 (PendingCMD[idx].Status == CMD_ABORT)) {
1042 process_cmd_fail_abort(first_failed_cmd, idx, event);
1043 } else if ((PendingCMD[idx].Status == CMD_NOT_DONE) &&
1044 PendingCMD[idx].Tag) {
1045 nand_dbg_print(NAND_DBG_DEBUG,
1046 " Command no. %hu is not executed\n",
1047 (unsigned int)PendingCMD[idx].Tag);
1048 ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
1049 while (ftl_cmd <= PendingCMD[idx].Tag) {
1050 p_BTableChangesDelta += 1;
1051 ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
1052 }
1053 }
1054 }
1055 #endif
1056
1057 static void process_cmd(int *first_failed_cmd, u16 idx, int event)
1058 {
1059 printk(KERN_ERR "temporary workaround function. "
1060 "Should not be called! \n");
1061 }
1062
1063 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1064 * Function: GLOB_FTL_Event_Status
1065 * Inputs: none
1066 * Outputs: Event Code
1067 * Description: It is called by SBD after hardware interrupt signalling
1068 * completion of commands chain
1069 * It does following things
1070 * get event status from LLD
1071 * analyze command chain status
1072 * determine last command executed
1073 * analyze results
1074 * rebuild the block table in case of uncorrectable error
1075 * return event code
1076 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1077 int GLOB_FTL_Event_Status(int *first_failed_cmd)
1078 {
1079 int event_code = PASS;
1080 u16 i_P;
1081
1082 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1083 __FILE__, __LINE__, __func__);
1084
1085 *first_failed_cmd = 0;
1086
1087 event_code = GLOB_LLD_Event_Status();
1088
1089 switch (event_code) {
1090 case EVENT_PASS:
1091 nand_dbg_print(NAND_DBG_DEBUG, "Handling EVENT_PASS\n");
1092 break;
1093 case EVENT_UNCORRECTABLE_DATA_ERROR:
1094 nand_dbg_print(NAND_DBG_DEBUG, "Handling Uncorrectable ECC!\n");
1095 break;
1096 case EVENT_PROGRAM_FAILURE:
1097 case EVENT_ERASE_FAILURE:
1098 nand_dbg_print(NAND_DBG_WARN, "Handling Ugly case. "
1099 "Event code: 0x%x\n", event_code);
1100 p_BTableChangesDelta =
1101 (struct BTableChangesDelta *)g_pBTDelta;
1102 for (i_P = MAX_CHANS; i_P < (ftl_cmd_cnt + MAX_CHANS);
1103 i_P++)
1104 process_cmd(first_failed_cmd, i_P, event_code);
1105 memcpy(g_pBlockTable, g_pBTStartingCopy,
1106 DeviceInfo.wDataBlockNum * sizeof(u32));
1107 memcpy(g_pWearCounter, g_pWearCounterCopy,
1108 DeviceInfo.wDataBlockNum * sizeof(u8));
1109 if (DeviceInfo.MLCDevice)
1110 memcpy(g_pReadCounter, g_pReadCounterCopy,
1111 DeviceInfo.wDataBlockNum * sizeof(u16));
1112
1113 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1114 memcpy((void *)&Cache, (void *)&cache_start_copy,
1115 sizeof(struct flash_cache_tag));
1116 memset((void *)&int_cache, -1,
1117 sizeof(struct flash_cache_delta_list_tag) *
1118 (MAX_DESCS + MAX_CHANS));
1119 #endif
1120 break;
1121 default:
1122 nand_dbg_print(NAND_DBG_WARN,
1123 "Handling unexpected event code - 0x%x\n",
1124 event_code);
1125 event_code = ERR;
1126 break;
1127 }
1128
1129 memcpy(g_pBTStartingCopy, g_pBlockTable,
1130 DeviceInfo.wDataBlockNum * sizeof(u32));
1131 memcpy(g_pWearCounterCopy, g_pWearCounter,
1132 DeviceInfo.wDataBlockNum * sizeof(u8));
1133 if (DeviceInfo.MLCDevice)
1134 memcpy(g_pReadCounterCopy, g_pReadCounter,
1135 DeviceInfo.wDataBlockNum * sizeof(u16));
1136
1137 g_pBTDelta_Free = g_pBTDelta;
1138 ftl_cmd_cnt = 0;
1139 g_pNextBlockTable = g_pBlockTableCopies;
1140 cp_back_buf_idx = 0;
1141
1142 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1143 memcpy((void *)&cache_start_copy, (void *)&Cache,
1144 sizeof(struct flash_cache_tag));
1145 memset((void *)&int_cache, -1,
1146 sizeof(struct flash_cache_delta_list_tag) *
1147 (MAX_DESCS + MAX_CHANS));
1148 #endif
1149
1150 return event_code;
1151 }
1152
1153 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1154 * Function: glob_ftl_execute_cmds
1155 * Inputs: none
1156 * Outputs: none
1157 * Description: pass thru to LLD
1158 ***************************************************************/
1159 u16 glob_ftl_execute_cmds(void)
1160 {
1161 nand_dbg_print(NAND_DBG_TRACE,
1162 "glob_ftl_execute_cmds: ftl_cmd_cnt %u\n",
1163 (unsigned int)ftl_cmd_cnt);
1164 g_SBDCmdIndex = 0;
1165 return glob_lld_execute_cmds();
1166 }
1167
1168 #endif
1169
1170 #if !CMD_DMA
1171 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1172 * Function: GLOB_FTL_Read Immediate
1173 * Inputs: pointer to data
1174 * address of data
1175 * Outputs: PASS / FAIL
1176 * Description: Reads one page of data into RAM directly from flash without
1177 * using or disturbing cache.It is assumed this function is called
1178 * with CMD-DMA disabled.
1179 *****************************************************************/
1180 int GLOB_FTL_Read_Immediate(u8 *read_data, u64 addr)
1181 {
1182 int wResult = FAIL;
1183 u32 Block;
1184 u16 Page;
1185 u32 phy_blk;
1186 u32 *pbt = (u32 *)g_pBlockTable;
1187
1188 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1189 __FILE__, __LINE__, __func__);
1190
1191 Block = BLK_FROM_ADDR(addr);
1192 Page = PAGE_FROM_ADDR(addr, Block);
1193
1194 if (!IS_SPARE_BLOCK(Block))
1195 return FAIL;
1196
1197 phy_blk = pbt[Block];
1198 wResult = GLOB_LLD_Read_Page_Main(read_data, phy_blk, Page, 1);
1199
1200 if (DeviceInfo.MLCDevice) {
1201 g_pReadCounter[phy_blk - DeviceInfo.wSpectraStartBlock]++;
1202 if (g_pReadCounter[phy_blk - DeviceInfo.wSpectraStartBlock]
1203 >= MAX_READ_COUNTER)
1204 FTL_Read_Disturbance(phy_blk);
1205 if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
1206 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
1207 FTL_Write_IN_Progress_Block_Table_Page();
1208 }
1209 }
1210
1211 return wResult;
1212 }
1213 #endif
1214
1215 #ifdef SUPPORT_BIG_ENDIAN
1216 /*********************************************************************
1217 * Function: FTL_Invert_Block_Table
1218 * Inputs: none
1219 * Outputs: none
1220 * Description: Re-format the block table in ram based on BIG_ENDIAN and
1221 * LARGE_BLOCKNUM if necessary
1222 **********************************************************************/
1223 static void FTL_Invert_Block_Table(void)
1224 {
1225 u32 i;
1226 u32 *pbt = (u32 *)g_pBlockTable;
1227
1228 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1229 __FILE__, __LINE__, __func__);
1230
1231 #ifdef SUPPORT_LARGE_BLOCKNUM
1232 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1233 pbt[i] = INVERTUINT32(pbt[i]);
1234 g_pWearCounter[i] = INVERTUINT32(g_pWearCounter[i]);
1235 }
1236 #else
1237 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1238 pbt[i] = INVERTUINT16(pbt[i]);
1239 g_pWearCounter[i] = INVERTUINT16(g_pWearCounter[i]);
1240 }
1241 #endif
1242 }
1243 #endif
1244
1245 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1246 * Function: GLOB_FTL_Flash_Init
1247 * Inputs: none
1248 * Outputs: PASS=0 / FAIL=0x01 (based on read ID)
1249 * Description: The flash controller is initialized
1250 * The flash device is reset
1251 * Perform a flash READ ID command to confirm that a
1252 * valid device is attached and active.
1253 * The DeviceInfo structure gets filled in
1254 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1255 int GLOB_FTL_Flash_Init(void)
1256 {
1257 int status = FAIL;
1258
1259 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1260 __FILE__, __LINE__, __func__);
1261
1262 g_SBDCmdIndex = 0;
1263
1264 GLOB_LLD_Flash_Init();
1265
1266 status = GLOB_LLD_Read_Device_ID();
1267
1268 return status;
1269 }
1270
1271 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1272 * Inputs: none
1273 * Outputs: PASS=0 / FAIL=0x01 (based on read ID)
1274 * Description: The flash controller is released
1275 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1276 int GLOB_FTL_Flash_Release(void)
1277 {
1278 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1279 __FILE__, __LINE__, __func__);
1280
1281 return GLOB_LLD_Flash_Release();
1282 }
1283
1284
1285 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1286 * Function: GLOB_FTL_Cache_Release
1287 * Inputs: none
1288 * Outputs: none
1289 * Description: release all allocated memory in GLOB_FTL_Init
1290 * (allocated in GLOB_FTL_Init)
1291 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1292 void GLOB_FTL_Cache_Release(void)
1293 {
1294 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1295 __FILE__, __LINE__, __func__);
1296
1297 free_memory();
1298 }
1299
1300 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1301 * Function: FTL_Cache_If_Hit
1302 * Inputs: Page Address
1303 * Outputs: Block number/UNHIT BLOCK
1304 * Description: Determines if the addressed page is in cache
1305 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1306 static u16 FTL_Cache_If_Hit(u64 page_addr)
1307 {
1308 u16 item;
1309 u64 addr;
1310 int i;
1311
1312 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1313 __FILE__, __LINE__, __func__);
1314
1315 item = UNHIT_CACHE_ITEM;
1316 for (i = 0; i < CACHE_ITEM_NUM; i++) {
1317 addr = Cache.array[i].address;
1318 if ((page_addr >= addr) &&
1319 (page_addr < (addr + Cache.cache_item_size))) {
1320 item = i;
1321 break;
1322 }
1323 }
1324
1325 return item;
1326 }
1327
1328 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1329 * Function: FTL_Calculate_LRU
1330 * Inputs: None
1331 * Outputs: None
1332 * Description: Calculate the least recently block in a cache and record its
1333 * index in LRU field.
1334 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1335 static void FTL_Calculate_LRU(void)
1336 {
1337 u16 i, bCurrentLRU, bTempCount;
1338
1339 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1340 __FILE__, __LINE__, __func__);
1341
1342 bCurrentLRU = 0;
1343 bTempCount = MAX_WORD_VALUE;
1344
1345 for (i = 0; i < CACHE_ITEM_NUM; i++) {
1346 if (Cache.array[i].use_cnt < bTempCount) {
1347 bCurrentLRU = i;
1348 bTempCount = Cache.array[i].use_cnt;
1349 }
1350 }
1351
1352 Cache.LRU = bCurrentLRU;
1353 }
1354
1355 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1356 * Function: FTL_Cache_Read_Page
1357 * Inputs: pointer to read buffer, logical address and cache item number
1358 * Outputs: None
1359 * Description: Read the page from the cached block addressed by blocknumber
1360 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1361 static void FTL_Cache_Read_Page(u8 *data_buf, u64 logic_addr, u16 cache_item)
1362 {
1363 u8 *start_addr;
1364
1365 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1366 __FILE__, __LINE__, __func__);
1367
1368 start_addr = Cache.array[cache_item].buf;
1369 start_addr += (u32)(((logic_addr - Cache.array[cache_item].address) >>
1370 DeviceInfo.nBitsInPageDataSize) * DeviceInfo.wPageDataSize);
1371
1372 #if CMD_DMA
1373 GLOB_LLD_MemCopy_CMD(data_buf, start_addr,
1374 DeviceInfo.wPageDataSize, 0);
1375 ftl_cmd_cnt++;
1376 #else
1377 memcpy(data_buf, start_addr, DeviceInfo.wPageDataSize);
1378 #endif
1379
1380 if (Cache.array[cache_item].use_cnt < MAX_WORD_VALUE)
1381 Cache.array[cache_item].use_cnt++;
1382 }
1383
1384 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1385 * Function: FTL_Cache_Read_All
1386 * Inputs: pointer to read buffer,block address
1387 * Outputs: PASS=0 / FAIL =1
1388 * Description: It reads pages in cache
1389 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1390 static int FTL_Cache_Read_All(u8 *pData, u64 phy_addr)
1391 {
1392 int wResult = PASS;
1393 u32 Block;
1394 u32 lba;
1395 u16 Page;
1396 u16 PageCount;
1397 u32 *pbt = (u32 *)g_pBlockTable;
1398 u32 i;
1399
1400 Block = BLK_FROM_ADDR(phy_addr);
1401 Page = PAGE_FROM_ADDR(phy_addr, Block);
1402 PageCount = Cache.pages_per_item;
1403
1404 nand_dbg_print(NAND_DBG_DEBUG,
1405 "%s, Line %d, Function: %s, Block: 0x%x\n",
1406 __FILE__, __LINE__, __func__, Block);
1407
1408 lba = 0xffffffff;
1409 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1410 if ((pbt[i] & (~BAD_BLOCK)) == Block) {
1411 lba = i;
1412 if (IS_SPARE_BLOCK(i) || IS_BAD_BLOCK(i) ||
1413 IS_DISCARDED_BLOCK(i)) {
1414 /* Add by yunpeng -2008.12.3 */
1415 #if CMD_DMA
1416 GLOB_LLD_MemCopy_CMD(pData, g_temp_buf,
1417 PageCount * DeviceInfo.wPageDataSize, 0);
1418 ftl_cmd_cnt++;
1419 #else
1420 memset(pData, 0xFF,
1421 PageCount * DeviceInfo.wPageDataSize);
1422 #endif
1423 return wResult;
1424 } else {
1425 continue; /* break ?? */
1426 }
1427 }
1428 }
1429
1430 if (0xffffffff == lba)
1431 printk(KERN_ERR "FTL_Cache_Read_All: Block is not found in BT\n");
1432
1433 #if CMD_DMA
1434 wResult = GLOB_LLD_Read_Page_Main_cdma(pData, Block, Page,
1435 PageCount, LLD_CMD_FLAG_MODE_CDMA);
1436 if (DeviceInfo.MLCDevice) {
1437 g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]++;
1438 nand_dbg_print(NAND_DBG_DEBUG,
1439 "Read Counter modified in ftl_cmd_cnt %u"
1440 " Block %u Counter%u\n",
1441 ftl_cmd_cnt, (unsigned int)Block,
1442 g_pReadCounter[Block -
1443 DeviceInfo.wSpectraStartBlock]);
1444
1445 p_BTableChangesDelta =
1446 (struct BTableChangesDelta *)g_pBTDelta_Free;
1447 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
1448 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
1449 p_BTableChangesDelta->RC_Index =
1450 Block - DeviceInfo.wSpectraStartBlock;
1451 p_BTableChangesDelta->RC_Entry_Value =
1452 g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock];
1453 p_BTableChangesDelta->ValidFields = 0xC0;
1454
1455 ftl_cmd_cnt++;
1456
1457 if (g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock] >=
1458 MAX_READ_COUNTER)
1459 FTL_Read_Disturbance(Block);
1460 if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
1461 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
1462 FTL_Write_IN_Progress_Block_Table_Page();
1463 }
1464 } else {
1465 ftl_cmd_cnt++;
1466 }
1467 #else
1468 wResult = GLOB_LLD_Read_Page_Main(pData, Block, Page, PageCount);
1469 if (wResult == FAIL)
1470 return wResult;
1471
1472 if (DeviceInfo.MLCDevice) {
1473 g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]++;
1474 if (g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock] >=
1475 MAX_READ_COUNTER)
1476 FTL_Read_Disturbance(Block);
1477 if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
1478 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
1479 FTL_Write_IN_Progress_Block_Table_Page();
1480 }
1481 }
1482 #endif
1483 return wResult;
1484 }
1485
1486 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1487 * Function: FTL_Cache_Write_All
1488 * Inputs: pointer to cache in sys memory
1489 * address of free block in flash
1490 * Outputs: PASS=0 / FAIL=1
1491 * Description: writes all the pages of the block in cache to flash
1492 *
1493 * NOTE:need to make sure this works ok when cache is limited
1494 * to a partial block. This is where copy-back would be
1495 * activated. This would require knowing which pages in the
1496 * cached block are clean/dirty.Right now we only know if
1497 * the whole block is clean/dirty.
1498 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1499 static int FTL_Cache_Write_All(u8 *pData, u64 blk_addr)
1500 {
1501 u16 wResult = PASS;
1502 u32 Block;
1503 u16 Page;
1504 u16 PageCount;
1505
1506 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1507 __FILE__, __LINE__, __func__);
1508
1509 nand_dbg_print(NAND_DBG_DEBUG, "This block %d going to be written "
1510 "on %d\n", cache_block_to_write,
1511 (u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize));
1512
1513 Block = BLK_FROM_ADDR(blk_addr);
1514 Page = PAGE_FROM_ADDR(blk_addr, Block);
1515 PageCount = Cache.pages_per_item;
1516
1517 #if CMD_DMA
1518 if (FAIL == GLOB_LLD_Write_Page_Main_cdma(pData,
1519 Block, Page, PageCount)) {
1520 nand_dbg_print(NAND_DBG_WARN,
1521 "NAND Program fail in %s, Line %d, "
1522 "Function: %s, new Bad Block %d generated! "
1523 "Need Bad Block replacing.\n",
1524 __FILE__, __LINE__, __func__, Block);
1525 wResult = FAIL;
1526 }
1527 ftl_cmd_cnt++;
1528 #else
1529 if (FAIL == GLOB_LLD_Write_Page_Main(pData, Block, Page, PageCount)) {
1530 nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in %s,"
1531 " Line %d, Function %s, new Bad Block %d generated!"
1532 "Need Bad Block replacing.\n",
1533 __FILE__, __LINE__, __func__, Block);
1534 wResult = FAIL;
1535 }
1536 #endif
1537 return wResult;
1538 }
1539
1540 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1541 * Function: FTL_Cache_Update_Block
1542 * Inputs: pointer to buffer,page address,block address
1543 * Outputs: PASS=0 / FAIL=1
1544 * Description: It updates the cache
1545 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1546 static int FTL_Cache_Update_Block(u8 *pData,
1547 u64 old_page_addr, u64 blk_addr)
1548 {
1549 int i, j;
1550 u8 *buf = pData;
1551 int wResult = PASS;
1552 int wFoundInCache;
1553 u64 page_addr;
1554 u64 addr;
1555 u64 old_blk_addr;
1556 u16 page_offset;
1557
1558 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1559 __FILE__, __LINE__, __func__);
1560
1561 old_blk_addr = (u64)(old_page_addr >>
1562 DeviceInfo.nBitsInBlockDataSize) * DeviceInfo.wBlockDataSize;
1563 page_offset = (u16)(GLOB_u64_Remainder(old_page_addr, 2) >>
1564 DeviceInfo.nBitsInPageDataSize);
1565
1566 for (i = 0; i < DeviceInfo.wPagesPerBlock; i += Cache.pages_per_item) {
1567 page_addr = old_blk_addr + i * DeviceInfo.wPageDataSize;
1568 if (i != page_offset) {
1569 wFoundInCache = FAIL;
1570 for (j = 0; j < CACHE_ITEM_NUM; j++) {
1571 addr = Cache.array[j].address;
1572 addr = FTL_Get_Physical_Block_Addr(addr) +
1573 GLOB_u64_Remainder(addr, 2);
1574 if ((addr >= page_addr) && addr <
1575 (page_addr + Cache.cache_item_size)) {
1576 wFoundInCache = PASS;
1577 buf = Cache.array[j].buf;
1578 Cache.array[j].changed = SET;
1579 #if CMD_DMA
1580 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1581 int_cache[ftl_cmd_cnt].item = j;
1582 int_cache[ftl_cmd_cnt].cache.address =
1583 Cache.array[j].address;
1584 int_cache[ftl_cmd_cnt].cache.changed =
1585 Cache.array[j].changed;
1586 #endif
1587 #endif
1588 break;
1589 }
1590 }
1591 if (FAIL == wFoundInCache) {
1592 if (ERR == FTL_Cache_Read_All(g_pTempBuf,
1593 page_addr)) {
1594 wResult = FAIL;
1595 break;
1596 }
1597 buf = g_pTempBuf;
1598 }
1599 } else {
1600 buf = pData;
1601 }
1602
1603 if (FAIL == FTL_Cache_Write_All(buf,
1604 blk_addr + (page_addr - old_blk_addr))) {
1605 wResult = FAIL;
1606 break;
1607 }
1608 }
1609
1610 return wResult;
1611 }
1612
1613 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1614 * Function: FTL_Copy_Block
1615 * Inputs: source block address
1616 * Destination block address
1617 * Outputs: PASS=0 / FAIL=1
1618 * Description: used only for static wear leveling to move the block
1619 * containing static data to new blocks(more worn)
1620 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1621 int FTL_Copy_Block(u64 old_blk_addr, u64 blk_addr)
1622 {
1623 int i, r1, r2, wResult = PASS;
1624
1625 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1626 __FILE__, __LINE__, __func__);
1627
1628 for (i = 0; i < DeviceInfo.wPagesPerBlock; i += Cache.pages_per_item) {
1629 r1 = FTL_Cache_Read_All(g_pTempBuf, old_blk_addr +
1630 i * DeviceInfo.wPageDataSize);
1631 r2 = FTL_Cache_Write_All(g_pTempBuf, blk_addr +
1632 i * DeviceInfo.wPageDataSize);
1633 if ((ERR == r1) || (FAIL == r2)) {
1634 wResult = FAIL;
1635 break;
1636 }
1637 }
1638
1639 return wResult;
1640 }
1641
1642 /* Search the block table to find out the least wear block and then return it */
1643 static u32 find_least_worn_blk_for_l2_cache(void)
1644 {
1645 int i;
1646 u32 *pbt = (u32 *)g_pBlockTable;
1647 u8 least_wear_cnt = MAX_BYTE_VALUE;
1648 u32 least_wear_blk_idx = MAX_U32_VALUE;
1649 u32 phy_idx;
1650
1651 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1652 if (IS_SPARE_BLOCK(i)) {
1653 phy_idx = (u32)((~BAD_BLOCK) & pbt[i]);
1654 if (phy_idx > DeviceInfo.wSpectraEndBlock)
1655 printk(KERN_ERR "find_least_worn_blk_for_l2_cache: "
1656 "Too big phy block num (%d)\n", phy_idx);
1657 if (g_pWearCounter[phy_idx -DeviceInfo.wSpectraStartBlock] < least_wear_cnt) {
1658 least_wear_cnt = g_pWearCounter[phy_idx - DeviceInfo.wSpectraStartBlock];
1659 least_wear_blk_idx = i;
1660 }
1661 }
1662 }
1663
1664 nand_dbg_print(NAND_DBG_WARN,
1665 "find_least_worn_blk_for_l2_cache: "
1666 "find block %d with least worn counter (%d)\n",
1667 least_wear_blk_idx, least_wear_cnt);
1668
1669 return least_wear_blk_idx;
1670 }
1671
1672
1673
1674 /* Get blocks for Level2 Cache */
1675 static int get_l2_cache_blks(void)
1676 {
1677 int n;
1678 u32 blk;
1679 u32 *pbt = (u32 *)g_pBlockTable;
1680
1681 for (n = 0; n < BLK_NUM_FOR_L2_CACHE; n++) {
1682 blk = find_least_worn_blk_for_l2_cache();
1683 if (blk > DeviceInfo.wDataBlockNum) {
1684 nand_dbg_print(NAND_DBG_WARN,
1685 "find_least_worn_blk_for_l2_cache: "
1686 "No enough free NAND blocks (n: %d) for L2 Cache!\n", n);
1687 return FAIL;
1688 }
1689 /* Tag the free block as discard in block table */
1690 pbt[blk] = (pbt[blk] & (~BAD_BLOCK)) | DISCARD_BLOCK;
1691 /* Add the free block to the L2 Cache block array */
1692 cache_l2.blk_array[n] = pbt[blk] & (~BAD_BLOCK);
1693 }
1694
1695 return PASS;
1696 }
1697
1698 static int erase_l2_cache_blocks(void)
1699 {
1700 int i, ret = PASS;
1701 u32 pblk, lblk;
1702 u64 addr;
1703 u32 *pbt = (u32 *)g_pBlockTable;
1704
1705 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1706 __FILE__, __LINE__, __func__);
1707
1708 for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++) {
1709 pblk = cache_l2.blk_array[i];
1710
1711 /* If the L2 cache block is invalid, then just skip it */
1712 if (MAX_U32_VALUE == pblk)
1713 continue;
1714
1715 BUG_ON(pblk > DeviceInfo.wSpectraEndBlock);
1716
1717 addr = (u64)pblk << DeviceInfo.nBitsInBlockDataSize;
1718 if (PASS == GLOB_FTL_Block_Erase(addr)) {
1719 /* Get logical block number of the erased block */
1720 lblk = FTL_Get_Block_Index(pblk);
1721 BUG_ON(BAD_BLOCK == lblk);
1722 /* Tag it as free in the block table */
1723 pbt[lblk] &= (u32)(~DISCARD_BLOCK);
1724 pbt[lblk] |= (u32)(SPARE_BLOCK);
1725 } else {
1726 MARK_BLOCK_AS_BAD(pbt[lblk]);
1727 ret = ERR;
1728 }
1729 }
1730
1731 return ret;
1732 }
1733
1734 /*
1735 * Merge the valid data page in the L2 cache blocks into NAND.
1736 */
1737 static int flush_l2_cache(void)
1738 {
1739 struct list_head *p;
1740 struct spectra_l2_cache_list *pnd, *tmp_pnd;
1741 u32 *pbt = (u32 *)g_pBlockTable;
1742 u32 phy_blk, l2_blk;
1743 u64 addr;
1744 u16 l2_page;
1745 int i, ret = PASS;
1746
1747 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1748 __FILE__, __LINE__, __func__);
1749
1750 if (list_empty(&cache_l2.table.list)) /* No data to flush */
1751 return ret;
1752
1753 //dump_cache_l2_table();
1754
1755 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
1756 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
1757 FTL_Write_IN_Progress_Block_Table_Page();
1758 }
1759
1760 list_for_each(p, &cache_l2.table.list) {
1761 pnd = list_entry(p, struct spectra_l2_cache_list, list);
1762 if (IS_SPARE_BLOCK(pnd->logical_blk_num) ||
1763 IS_BAD_BLOCK(pnd->logical_blk_num) ||
1764 IS_DISCARDED_BLOCK(pnd->logical_blk_num)) {
1765 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d\n", __FILE__, __LINE__);
1766 memset(cache_l2_blk_buf, 0xff, DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize);
1767 } else {
1768 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d\n", __FILE__, __LINE__);
1769 phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
1770 ret = GLOB_LLD_Read_Page_Main(cache_l2_blk_buf,
1771 phy_blk, 0, DeviceInfo.wPagesPerBlock);
1772 if (ret == FAIL) {
1773 printk(KERN_ERR "Read NAND page fail in %s, Line %d\n", __FILE__, __LINE__);
1774 }
1775 }
1776
1777 for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) {
1778 if (pnd->pages_array[i] != MAX_U32_VALUE) {
1779 l2_blk = cache_l2.blk_array[(pnd->pages_array[i] >> 16) & 0xffff];
1780 l2_page = pnd->pages_array[i] & 0xffff;
1781 ret = GLOB_LLD_Read_Page_Main(cache_l2_page_buf, l2_blk, l2_page, 1);
1782 if (ret == FAIL) {
1783 printk(KERN_ERR "Read NAND page fail in %s, Line %d\n", __FILE__, __LINE__);
1784 }
1785 memcpy(cache_l2_blk_buf + i * DeviceInfo.wPageDataSize, cache_l2_page_buf, DeviceInfo.wPageDataSize);
1786 }
1787 }
1788
1789 /* Find a free block and tag the original block as discarded */
1790 addr = (u64)pnd->logical_blk_num << DeviceInfo.nBitsInBlockDataSize;
1791 ret = FTL_Replace_Block(addr);
1792 if (ret == FAIL) {
1793 printk(KERN_ERR "FTL_Replace_Block fail in %s, Line %d\n", __FILE__, __LINE__);
1794 }
1795
1796 /* Write back the updated data into NAND */
1797 phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
1798 if (FAIL == GLOB_LLD_Write_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock)) {
1799 nand_dbg_print(NAND_DBG_WARN,
1800 "Program NAND block %d fail in %s, Line %d\n",
1801 phy_blk, __FILE__, __LINE__);
1802 /* This may not be really a bad block. So just tag it as discarded. */
1803 /* Then it has a chance to be erased when garbage collection. */
1804 /* If it is really bad, then the erase will fail and it will be marked */
1805 /* as bad then. Otherwise it will be marked as free and can be used again */
1806 MARK_BLK_AS_DISCARD(pbt[pnd->logical_blk_num]);
1807 /* Find another free block and write it again */
1808 FTL_Replace_Block(addr);
1809 phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
1810 if (FAIL == GLOB_LLD_Write_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock)) {
1811 printk(KERN_ERR "Failed to write back block %d when flush L2 cache."
1812 "Some data will be lost!\n", phy_blk);
1813 MARK_BLOCK_AS_BAD(pbt[pnd->logical_blk_num]);
1814 }
1815 } else {
1816 /* tag the new free block as used block */
1817 pbt[pnd->logical_blk_num] &= (~SPARE_BLOCK);
1818 }
1819 }
1820
1821 /* Destroy the L2 Cache table and free the memory of all nodes */
1822 list_for_each_entry_safe(pnd, tmp_pnd, &cache_l2.table.list, list) {
1823 list_del(&pnd->list);
1824 kfree(pnd);
1825 }
1826
1827 /* Erase discard L2 cache blocks */
1828 if (erase_l2_cache_blocks() != PASS)
1829 nand_dbg_print(NAND_DBG_WARN,
1830 " Erase L2 cache blocks error in %s, Line %d\n",
1831 __FILE__, __LINE__);
1832
1833 /* Init the Level2 Cache data structure */
1834 for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++)
1835 cache_l2.blk_array[i] = MAX_U32_VALUE;
1836 cache_l2.cur_blk_idx = 0;
1837 cache_l2.cur_page_num = 0;
1838 INIT_LIST_HEAD(&cache_l2.table.list);
1839 cache_l2.table.logical_blk_num = MAX_U32_VALUE;
1840
1841 return ret;
1842 }
1843
1844 /*
1845 * Write back a changed victim cache item to the Level2 Cache
1846 * and update the L2 Cache table to map the change.
1847 * If the L2 Cache is full, then start to do the L2 Cache flush.
1848 */
1849 static int write_back_to_l2_cache(u8 *buf, u64 logical_addr)
1850 {
1851 u32 logical_blk_num;
1852 u16 logical_page_num;
1853 struct list_head *p;
1854 struct spectra_l2_cache_list *pnd, *pnd_new;
1855 u32 node_size;
1856 int i, found;
1857
1858 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
1859 __FILE__, __LINE__, __func__);
1860
1861 /*
1862 * If Level2 Cache table is empty, then it means either:
1863 * 1. This is the first time that the function called after FTL_init
1864 * or
1865 * 2. The Level2 Cache has just been flushed
1866 *
1867 * So, 'steal' some free blocks from NAND for L2 Cache using
1868 * by just mask them as discard in the block table
1869 */
1870 if (list_empty(&cache_l2.table.list)) {
1871 BUG_ON(cache_l2.cur_blk_idx != 0);
1872 BUG_ON(cache_l2.cur_page_num!= 0);
1873 BUG_ON(cache_l2.table.logical_blk_num != MAX_U32_VALUE);
1874 if (FAIL == get_l2_cache_blks()) {
1875 GLOB_FTL_Garbage_Collection();
1876 if (FAIL == get_l2_cache_blks()) {
1877 printk(KERN_ALERT "Fail to get L2 cache blks!\n");
1878 return FAIL;
1879 }
1880 }
1881 }
1882
1883 logical_blk_num = BLK_FROM_ADDR(logical_addr);
1884 logical_page_num = PAGE_FROM_ADDR(logical_addr, logical_blk_num);
1885 BUG_ON(logical_blk_num == MAX_U32_VALUE);
1886
1887 /* Write the cache item data into the current position of L2 Cache */
1888 #if CMD_DMA
1889 /*
1890 * TODO
1891 */
1892 #else
1893 if (FAIL == GLOB_LLD_Write_Page_Main(buf,
1894 cache_l2.blk_array[cache_l2.cur_blk_idx],
1895 cache_l2.cur_page_num, 1)) {
1896 nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in "
1897 "%s, Line %d, new Bad Block %d generated!\n",
1898 __FILE__, __LINE__,
1899 cache_l2.blk_array[cache_l2.cur_blk_idx]);
1900
1901 /* TODO: tag the current block as bad and try again */
1902
1903 return FAIL;
1904 }
1905 #endif
1906
1907 /*
1908 * Update the L2 Cache table.
1909 *
1910 * First seaching in the table to see whether the logical block
1911 * has been mapped. If not, then kmalloc a new node for the
1912 * logical block, fill data, and then insert it to the list.
1913 * Otherwise, just update the mapped node directly.
1914 */
1915 found = 0;
1916 list_for_each(p, &cache_l2.table.list) {
1917 pnd = list_entry(p, struct spectra_l2_cache_list, list);
1918 if (pnd->logical_blk_num == logical_blk_num) {
1919 pnd->pages_array[logical_page_num] =
1920 (cache_l2.cur_blk_idx << 16) |
1921 cache_l2.cur_page_num;
1922 found = 1;
1923 break;
1924 }
1925 }
1926 if (!found) { /* Create new node for the logical block here */
1927
1928 /* The logical pages to physical pages map array is
1929 * located at the end of struct spectra_l2_cache_list.
1930 */
1931 node_size = sizeof(struct spectra_l2_cache_list) +
1932 sizeof(u32) * DeviceInfo.wPagesPerBlock;
1933 pnd_new = kmalloc(node_size, GFP_ATOMIC);
1934 if (!pnd_new) {
1935 printk(KERN_ERR "Failed to kmalloc in %s Line %d\n",
1936 __FILE__, __LINE__);
1937 /*
1938 * TODO: Need to flush all the L2 cache into NAND ASAP
1939 * since no memory available here
1940 */
1941 }
1942 pnd_new->logical_blk_num = logical_blk_num;
1943 for (i = 0; i < DeviceInfo.wPagesPerBlock; i++)
1944 pnd_new->pages_array[i] = MAX_U32_VALUE;
1945 pnd_new->pages_array[logical_page_num] =
1946 (cache_l2.cur_blk_idx << 16) | cache_l2.cur_page_num;
1947 list_add(&pnd_new->list, &cache_l2.table.list);
1948 }
1949
1950 /* Increasing the current position pointer of the L2 Cache */
1951 cache_l2.cur_page_num++;
1952 if (cache_l2.cur_page_num >= DeviceInfo.wPagesPerBlock) {
1953 cache_l2.cur_blk_idx++;
1954 if (cache_l2.cur_blk_idx >= BLK_NUM_FOR_L2_CACHE) {
1955 /* The L2 Cache is full. Need to flush it now */
1956 nand_dbg_print(NAND_DBG_WARN,
1957 "L2 Cache is full, will start to flush it\n");
1958 flush_l2_cache();
1959 } else {
1960 cache_l2.cur_page_num = 0;
1961 }
1962 }
1963
1964 return PASS;
1965 }
1966
1967 /*
1968 * Seach in the Level2 Cache table to find the cache item.
1969 * If find, read the data from the NAND page of L2 Cache,
1970 * Otherwise, return FAIL.
1971 */
1972 static int search_l2_cache(u8 *buf, u64 logical_addr)
1973 {
1974 u32 logical_blk_num;
1975 u16 logical_page_num;
1976 struct list_head *p;
1977 struct spectra_l2_cache_list *pnd;
1978 u32 tmp = MAX_U32_VALUE;
1979 u32 phy_blk;
1980 u16 phy_page;
1981 int ret = FAIL;
1982
1983 logical_blk_num = BLK_FROM_ADDR(logical_addr);
1984 logical_page_num = PAGE_FROM_ADDR(logical_addr, logical_blk_num);
1985
1986 list_for_each(p, &cache_l2.table.list) {
1987 pnd = list_entry(p, struct spectra_l2_cache_list, list);
1988 if (pnd->logical_blk_num == logical_blk_num) {
1989 tmp = pnd->pages_array[logical_page_num];
1990 break;
1991 }
1992 }
1993
1994 if (tmp != MAX_U32_VALUE) { /* Found valid map */
1995 phy_blk = cache_l2.blk_array[(tmp >> 16) & 0xFFFF];
1996 phy_page = tmp & 0xFFFF;
1997 #if CMD_DMA
1998 /* TODO */
1999 #else
2000 ret = GLOB_LLD_Read_Page_Main(buf, phy_blk, phy_page, 1);
2001 #endif
2002 }
2003
2004 return ret;
2005 }
2006
2007 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2008 * Function: FTL_Cache_Write_Back
2009 * Inputs: pointer to data cached in sys memory
2010 * address of free block in flash
2011 * Outputs: PASS=0 / FAIL=1
2012 * Description: writes all the pages of Cache Block to flash
2013 *
2014 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2015 static int FTL_Cache_Write_Back(u8 *pData, u64 blk_addr)
2016 {
2017 int i, j, iErase;
2018 u64 old_page_addr, addr, phy_addr;
2019 u32 *pbt = (u32 *)g_pBlockTable;
2020 u32 lba;
2021
2022 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2023 __FILE__, __LINE__, __func__);
2024
2025 old_page_addr = FTL_Get_Physical_Block_Addr(blk_addr) +
2026 GLOB_u64_Remainder(blk_addr, 2);
2027
2028 iErase = (FAIL == FTL_Replace_Block(blk_addr)) ? PASS : FAIL;
2029
2030 pbt[BLK_FROM_ADDR(blk_addr)] &= (~SPARE_BLOCK);
2031
2032 #if CMD_DMA
2033 p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free;
2034 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
2035
2036 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
2037 p_BTableChangesDelta->BT_Index = (u32)(blk_addr >>
2038 DeviceInfo.nBitsInBlockDataSize);
2039 p_BTableChangesDelta->BT_Entry_Value =
2040 pbt[(u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize)];
2041 p_BTableChangesDelta->ValidFields = 0x0C;
2042 #endif
2043
2044 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
2045 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
2046 FTL_Write_IN_Progress_Block_Table_Page();
2047 }
2048
2049 for (i = 0; i < RETRY_TIMES; i++) {
2050 if (PASS == iErase) {
2051 phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
2052 if (FAIL == GLOB_FTL_Block_Erase(phy_addr)) {
2053 lba = BLK_FROM_ADDR(blk_addr);
2054 MARK_BLOCK_AS_BAD(pbt[lba]);
2055 i = RETRY_TIMES;
2056 break;
2057 }
2058 }
2059
2060 for (j = 0; j < CACHE_ITEM_NUM; j++) {
2061 addr = Cache.array[j].address;
2062 if ((addr <= blk_addr) &&
2063 ((addr + Cache.cache_item_size) > blk_addr))
2064 cache_block_to_write = j;
2065 }
2066
2067 phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
2068 if (PASS == FTL_Cache_Update_Block(pData,
2069 old_page_addr, phy_addr)) {
2070 cache_block_to_write = UNHIT_CACHE_ITEM;
2071 break;
2072 } else {
2073 iErase = PASS;
2074 }
2075 }
2076
2077 if (i >= RETRY_TIMES) {
2078 if (ERR == FTL_Flash_Error_Handle(pData,
2079 old_page_addr, blk_addr))
2080 return ERR;
2081 else
2082 return FAIL;
2083 }
2084
2085 return PASS;
2086 }
2087
2088 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2089 * Function: FTL_Cache_Write_Page
2090 * Inputs: Pointer to buffer, page address, cache block number
2091 * Outputs: PASS=0 / FAIL=1
2092 * Description: It writes the data in Cache Block
2093 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2094 static void FTL_Cache_Write_Page(u8 *pData, u64 page_addr,
2095 u8 cache_blk, u16 flag)
2096 {
2097 u8 *pDest;
2098 u64 addr;
2099
2100 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2101 __FILE__, __LINE__, __func__);
2102
2103 addr = Cache.array[cache_blk].address;
2104 pDest = Cache.array[cache_blk].buf;
2105
2106 pDest += (unsigned long)(page_addr - addr);
2107 Cache.array[cache_blk].changed = SET;
2108 #if CMD_DMA
2109 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
2110 int_cache[ftl_cmd_cnt].item = cache_blk;
2111 int_cache[ftl_cmd_cnt].cache.address =
2112 Cache.array[cache_blk].address;
2113 int_cache[ftl_cmd_cnt].cache.changed =
2114 Cache.array[cache_blk].changed;
2115 #endif
2116 GLOB_LLD_MemCopy_CMD(pDest, pData, DeviceInfo.wPageDataSize, flag);
2117 ftl_cmd_cnt++;
2118 #else
2119 memcpy(pDest, pData, DeviceInfo.wPageDataSize);
2120 #endif
2121 if (Cache.array[cache_blk].use_cnt < MAX_WORD_VALUE)
2122 Cache.array[cache_blk].use_cnt++;
2123 }
2124
2125 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2126 * Function: FTL_Cache_Write
2127 * Inputs: none
2128 * Outputs: PASS=0 / FAIL=1
2129 * Description: It writes least frequently used Cache block to flash if it
2130 * has been changed
2131 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2132 static int FTL_Cache_Write(void)
2133 {
2134 int i, bResult = PASS;
2135 u16 bNO, least_count = 0xFFFF;
2136
2137 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2138 __FILE__, __LINE__, __func__);
2139
2140 FTL_Calculate_LRU();
2141
2142 bNO = Cache.LRU;
2143 nand_dbg_print(NAND_DBG_DEBUG, "FTL_Cache_Write: "
2144 "Least used cache block is %d\n", bNO);
2145
2146 if (Cache.array[bNO].changed != SET)
2147 return bResult;
2148
2149 nand_dbg_print(NAND_DBG_DEBUG, "FTL_Cache_Write: Cache"
2150 " Block %d containing logical block %d is dirty\n",
2151 bNO,
2152 (u32)(Cache.array[bNO].address >>
2153 DeviceInfo.nBitsInBlockDataSize));
2154 #if CMD_DMA
2155 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
2156 int_cache[ftl_cmd_cnt].item = bNO;
2157 int_cache[ftl_cmd_cnt].cache.address =
2158 Cache.array[bNO].address;
2159 int_cache[ftl_cmd_cnt].cache.changed = CLEAR;
2160 #endif
2161 #endif
2162 bResult = write_back_to_l2_cache(Cache.array[bNO].buf,
2163 Cache.array[bNO].address);
2164 if (bResult != ERR)
2165 Cache.array[bNO].changed = CLEAR;
2166
2167 least_count = Cache.array[bNO].use_cnt;
2168
2169 for (i = 0; i < CACHE_ITEM_NUM; i++) {
2170 if (i == bNO)
2171 continue;
2172 if (Cache.array[i].use_cnt > 0)
2173 Cache.array[i].use_cnt -= least_count;
2174 }
2175
2176 return bResult;
2177 }
2178
2179 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2180 * Function: FTL_Cache_Read
2181 * Inputs: Page address
2182 * Outputs: PASS=0 / FAIL=1
2183 * Description: It reads the block from device in Cache Block
2184 * Set the LRU count to 1
2185 * Mark the Cache Block as clean
2186 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2187 static int FTL_Cache_Read(u64 logical_addr)
2188 {
2189 u64 item_addr, phy_addr;
2190 u16 num;
2191 int ret;
2192
2193 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2194 __FILE__, __LINE__, __func__);
2195
2196 num = Cache.LRU; /* The LRU cache item will be overwritten */
2197
2198 item_addr = (u64)GLOB_u64_Div(logical_addr, Cache.cache_item_size) *
2199 Cache.cache_item_size;
2200 Cache.array[num].address = item_addr;
2201 Cache.array[num].use_cnt = 1;
2202 Cache.array[num].changed = CLEAR;
2203
2204 #if CMD_DMA
2205 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
2206 int_cache[ftl_cmd_cnt].item = num;
2207 int_cache[ftl_cmd_cnt].cache.address =
2208 Cache.array[num].address;
2209 int_cache[ftl_cmd_cnt].cache.changed =
2210 Cache.array[num].changed;
2211 #endif
2212 #endif
2213 /*
2214 * Search in L2 Cache. If hit, fill data into L1 Cache item buffer,
2215 * Otherwise, read it from NAND
2216 */
2217 ret = search_l2_cache(Cache.array[num].buf, logical_addr);
2218 if (PASS == ret) /* Hit in L2 Cache */
2219 return ret;
2220
2221 /* Compute the physical start address of NAND device according to */
2222 /* the logical start address of the cache item (LRU cache item) */
2223 phy_addr = FTL_Get_Physical_Block_Addr(item_addr) +
2224 GLOB_u64_Remainder(item_addr, 2);
2225
2226 return FTL_Cache_Read_All(Cache.array[num].buf, phy_addr);
2227 }
2228
2229 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2230 * Function: FTL_Check_Block_Table
2231 * Inputs: ?
2232 * Outputs: PASS=0 / FAIL=1
2233 * Description: It checks the correctness of each block table entry
2234 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2235 static int FTL_Check_Block_Table(int wOldTable)
2236 {
2237 u32 i;
2238 int wResult = PASS;
2239 u32 blk_idx;
2240 u32 *pbt = (u32 *)g_pBlockTable;
2241 u8 *pFlag = flag_check_blk_table;
2242
2243 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2244 __FILE__, __LINE__, __func__);
2245
2246 if (NULL != pFlag) {
2247 memset(pFlag, FAIL, DeviceInfo.wDataBlockNum);
2248 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
2249 blk_idx = (u32)(pbt[i] & (~BAD_BLOCK));
2250
2251 /*
2252 * 20081006/KBV - Changed to pFlag[i] reference
2253 * to avoid buffer overflow
2254 */
2255
2256 /*
2257 * 2008-10-20 Yunpeng Note: This change avoid
2258 * buffer overflow, but changed function of
2259 * the code, so it should be re-write later
2260 */
2261 if ((blk_idx > DeviceInfo.wSpectraEndBlock) ||
2262 PASS == pFlag[i]) {
2263 wResult = FAIL;
2264 break;
2265 } else {
2266 pFlag[i] = PASS;
2267 }
2268 }
2269 }
2270
2271 return wResult;
2272 }
2273
2274
2275 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2276 * Function: FTL_Write_Block_Table
2277 * Inputs: flasg
2278 * Outputs: 0=Block Table was updated. No write done. 1=Block write needs to
2279 * happen. -1 Error
2280 * Description: It writes the block table
2281 * Block table always mapped to LBA 0 which inturn mapped
2282 * to any physical block
2283 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2284 static int FTL_Write_Block_Table(int wForce)
2285 {
2286 u32 *pbt = (u32 *)g_pBlockTable;
2287 int wSuccess = PASS;
2288 u32 wTempBlockTableIndex;
2289 u16 bt_pages, new_bt_offset;
2290 u8 blockchangeoccured = 0;
2291
2292 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2293 __FILE__, __LINE__, __func__);
2294
2295 bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
2296
2297 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus)
2298 return 0;
2299
2300 if (PASS == wForce) {
2301 g_wBlockTableOffset =
2302 (u16)(DeviceInfo.wPagesPerBlock - bt_pages);
2303 #if CMD_DMA
2304 p_BTableChangesDelta =
2305 (struct BTableChangesDelta *)g_pBTDelta_Free;
2306 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
2307
2308 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
2309 p_BTableChangesDelta->g_wBlockTableOffset =
2310 g_wBlockTableOffset;
2311 p_BTableChangesDelta->ValidFields = 0x01;
2312 #endif
2313 }
2314
2315 nand_dbg_print(NAND_DBG_DEBUG,
2316 "Inside FTL_Write_Block_Table: block %d Page:%d\n",
2317 g_wBlockTableIndex, g_wBlockTableOffset);
2318
2319 do {
2320 new_bt_offset = g_wBlockTableOffset + bt_pages + 1;
2321 if ((0 == (new_bt_offset % DeviceInfo.wPagesPerBlock)) ||
2322 (new_bt_offset > DeviceInfo.wPagesPerBlock) ||
2323 (FAIL == wSuccess)) {
2324 wTempBlockTableIndex = FTL_Replace_Block_Table();
2325 if (BAD_BLOCK == wTempBlockTableIndex)
2326 return ERR;
2327 if (!blockchangeoccured) {
2328 bt_block_changed = 1;
2329 blockchangeoccured = 1;
2330 }
2331
2332 g_wBlockTableIndex = wTempBlockTableIndex;
2333 g_wBlockTableOffset = 0;
2334 pbt[BLOCK_TABLE_INDEX] = g_wBlockTableIndex;
2335 #if CMD_DMA
2336 p_BTableChangesDelta =
2337 (struct BTableChangesDelta *)g_pBTDelta_Free;
2338 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
2339
2340 p_BTableChangesDelta->ftl_cmd_cnt =
2341 ftl_cmd_cnt;
2342 p_BTableChangesDelta->g_wBlockTableOffset =
2343 g_wBlockTableOffset;
2344 p_BTableChangesDelta->g_wBlockTableIndex =
2345 g_wBlockTableIndex;
2346 p_BTableChangesDelta->ValidFields = 0x03;
2347
2348 p_BTableChangesDelta =
2349 (struct BTableChangesDelta *)g_pBTDelta_Free;
2350 g_pBTDelta_Free +=
2351 sizeof(struct BTableChangesDelta);
2352
2353 p_BTableChangesDelta->ftl_cmd_cnt =
2354 ftl_cmd_cnt;
2355 p_BTableChangesDelta->BT_Index =
2356 BLOCK_TABLE_INDEX;
2357 p_BTableChangesDelta->BT_Entry_Value =
2358 pbt[BLOCK_TABLE_INDEX];
2359 p_BTableChangesDelta->ValidFields = 0x0C;
2360 #endif
2361 }
2362
2363 wSuccess = FTL_Write_Block_Table_Data();
2364 if (FAIL == wSuccess)
2365 MARK_BLOCK_AS_BAD(pbt[BLOCK_TABLE_INDEX]);
2366 } while (FAIL == wSuccess);
2367
2368 g_cBlockTableStatus = CURRENT_BLOCK_TABLE;
2369
2370 return 1;
2371 }
2372
2373 /******************************************************************
2374 * Function: GLOB_FTL_Flash_Format
2375 * Inputs: none
2376 * Outputs: PASS
2377 * Description: The block table stores bad block info, including MDF+
2378 * blocks gone bad over the ages. Therefore, if we have a
2379 * block table in place, then use it to scan for bad blocks
2380 * If not, then scan for MDF.
2381 * Now, a block table will only be found if spectra was already
2382 * being used. For a fresh flash, we'll go thru scanning for
2383 * MDF. If spectra was being used, then there is a chance that
2384 * the MDF has been corrupted. Spectra avoids writing to the
2385 * first 2 bytes of the spare area to all pages in a block. This
2386 * covers all known flash devices. However, since flash
2387 * manufacturers have no standard of where the MDF is stored,
2388 * this cannot guarantee that the MDF is protected for future
2389 * devices too. The initial scanning for the block table assures
2390 * this. It is ok even if the block table is outdated, as all
2391 * we're looking for are bad block markers.
2392 * Use this when mounting a file system or starting a
2393 * new flash.
2394 *
2395 *********************************************************************/
2396 static int FTL_Format_Flash(u8 valid_block_table)
2397 {
2398 u32 i, j;
2399 u32 *pbt = (u32 *)g_pBlockTable;
2400 u32 tempNode;
2401 int ret;
2402
2403 #if CMD_DMA
2404 u32 *pbtStartingCopy = (u32 *)g_pBTStartingCopy;
2405 if (ftl_cmd_cnt)
2406 return FAIL;
2407 #endif
2408
2409 if (FAIL == FTL_Check_Block_Table(FAIL))
2410 valid_block_table = 0;
2411
2412 if (valid_block_table) {
2413 u8 switched = 1;
2414 u32 block, k;
2415
2416 k = DeviceInfo.wSpectraStartBlock;
2417 while (switched && (k < DeviceInfo.wSpectraEndBlock)) {
2418 switched = 0;
2419 k++;
2420 for (j = DeviceInfo.wSpectraStartBlock, i = 0;
2421 j <= DeviceInfo.wSpectraEndBlock;
2422 j++, i++) {
2423 block = (pbt[i] & ~BAD_BLOCK) -
2424 DeviceInfo.wSpectraStartBlock;
2425 if (block != i) {
2426 switched = 1;
2427 tempNode = pbt[i];
2428 pbt[i] = pbt[block];
2429 pbt[block] = tempNode;
2430 }
2431 }
2432 }
2433 if ((k == DeviceInfo.wSpectraEndBlock) && switched)
2434 valid_block_table = 0;
2435 }
2436
2437 if (!valid_block_table) {
2438 memset(g_pBlockTable, 0,
2439 DeviceInfo.wDataBlockNum * sizeof(u32));
2440 memset(g_pWearCounter, 0,
2441 DeviceInfo.wDataBlockNum * sizeof(u8));
2442 if (DeviceInfo.MLCDevice)
2443 memset(g_pReadCounter, 0,
2444 DeviceInfo.wDataBlockNum * sizeof(u16));
2445 #if CMD_DMA
2446 memset(g_pBTStartingCopy, 0,
2447 DeviceInfo.wDataBlockNum * sizeof(u32));
2448 memset(g_pWearCounterCopy, 0,
2449 DeviceInfo.wDataBlockNum * sizeof(u8));
2450 if (DeviceInfo.MLCDevice)
2451 memset(g_pReadCounterCopy, 0,
2452 DeviceInfo.wDataBlockNum * sizeof(u16));
2453 #endif
2454 for (j = DeviceInfo.wSpectraStartBlock, i = 0;
2455 j <= DeviceInfo.wSpectraEndBlock;
2456 j++, i++) {
2457 if (GLOB_LLD_Get_Bad_Block((u32)j))
2458 pbt[i] = (u32)(BAD_BLOCK | j);
2459 }
2460 }
2461
2462 nand_dbg_print(NAND_DBG_WARN, "Erasing all blocks in the NAND\n");
2463
2464 for (j = DeviceInfo.wSpectraStartBlock, i = 0;
2465 j <= DeviceInfo.wSpectraEndBlock;
2466 j++, i++) {
2467 if ((pbt[i] & BAD_BLOCK) != BAD_BLOCK) {
2468 ret = GLOB_LLD_Erase_Block(j);
2469 if (FAIL == ret) {
2470 pbt[i] = (u32)(j);
2471 MARK_BLOCK_AS_BAD(pbt[i]);
2472 nand_dbg_print(NAND_DBG_WARN,
2473 "NAND Program fail in %s, Line %d, "
2474 "Function: %s, new Bad Block %d generated!\n",
2475 __FILE__, __LINE__, __func__, (int)j);
2476 } else {
2477 pbt[i] = (u32)(SPARE_BLOCK | j);
2478 }
2479 }
2480 #if CMD_DMA
2481 pbtStartingCopy[i] = pbt[i];
2482 #endif
2483 }
2484
2485 g_wBlockTableOffset = 0;
2486 for (i = 0; (i <= (DeviceInfo.wSpectraEndBlock -
2487 DeviceInfo.wSpectraStartBlock))
2488 && ((pbt[i] & BAD_BLOCK) == BAD_BLOCK); i++)
2489 ;
2490 if (i > (DeviceInfo.wSpectraEndBlock - DeviceInfo.wSpectraStartBlock)) {
2491 printk(KERN_ERR "All blocks bad!\n");
2492 return FAIL;
2493 } else {
2494 g_wBlockTableIndex = pbt[i] & ~BAD_BLOCK;
2495 if (i != BLOCK_TABLE_INDEX) {
2496 tempNode = pbt[i];
2497 pbt[i] = pbt[BLOCK_TABLE_INDEX];
2498 pbt[BLOCK_TABLE_INDEX] = tempNode;
2499 }
2500 }
2501 pbt[BLOCK_TABLE_INDEX] &= (~SPARE_BLOCK);
2502
2503 #if CMD_DMA
2504 pbtStartingCopy[BLOCK_TABLE_INDEX] &= (~SPARE_BLOCK);
2505 #endif
2506
2507 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
2508 memset(g_pBTBlocks, 0xFF,
2509 (1 + LAST_BT_ID - FIRST_BT_ID) * sizeof(u32));
2510 g_pBTBlocks[FIRST_BT_ID-FIRST_BT_ID] = g_wBlockTableIndex;
2511 FTL_Write_Block_Table(FAIL);
2512
2513 for (i = 0; i < CACHE_ITEM_NUM; i++) {
2514 Cache.array[i].address = NAND_CACHE_INIT_ADDR;
2515 Cache.array[i].use_cnt = 0;
2516 Cache.array[i].changed = CLEAR;
2517 }
2518
2519 #if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA)
2520 memcpy((void *)&cache_start_copy, (void *)&Cache,
2521 sizeof(struct flash_cache_tag));
2522 #endif
2523 return PASS;
2524 }
2525
2526 static int force_format_nand(void)
2527 {
2528 u32 i;
2529
2530 /* Force erase the whole unprotected physical partiton of NAND */
2531 printk(KERN_ALERT "Start to force erase whole NAND device ...\n");
2532 printk(KERN_ALERT "From phyical block %d to %d\n",
2533 DeviceInfo.wSpectraStartBlock, DeviceInfo.wSpectraEndBlock);
2534 for (i = DeviceInfo.wSpectraStartBlock; i <= DeviceInfo.wSpectraEndBlock; i++) {
2535 if (GLOB_LLD_Erase_Block(i))
2536 printk(KERN_ERR "Failed to force erase NAND block %d\n", i);
2537 }
2538 printk(KERN_ALERT "Force Erase ends. Please reboot the system ...\n");
2539 while(1);
2540
2541 return PASS;
2542 }
2543
2544 int GLOB_FTL_Flash_Format(void)
2545 {
2546 //return FTL_Format_Flash(1);
2547 return force_format_nand();
2548
2549 }
2550
2551 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2552 * Function: FTL_Search_Block_Table_IN_Block
2553 * Inputs: Block Number
2554 * Pointer to page
2555 * Outputs: PASS / FAIL
2556 * Page contatining the block table
2557 * Description: It searches the block table in the block
2558 * passed as an argument.
2559 *
2560 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2561 static int FTL_Search_Block_Table_IN_Block(u32 BT_Block,
2562 u8 BT_Tag, u16 *Page)
2563 {
2564 u16 i, j, k;
2565 u16 Result = PASS;
2566 u16 Last_IPF = 0;
2567 u8 BT_Found = 0;
2568 u8 *tagarray;
2569 u8 *tempbuf = tmp_buf_search_bt_in_block;
2570 u8 *pSpareBuf = spare_buf_search_bt_in_block;
2571 u8 *pSpareBufBTLastPage = spare_buf_bt_search_bt_in_block;
2572 u8 bt_flag_last_page = 0xFF;
2573 u8 search_in_previous_pages = 0;
2574 u16 bt_pages;
2575
2576 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
2577 __FILE__, __LINE__, __func__);
2578
2579 nand_dbg_print(NAND_DBG_DEBUG,
2580 "Searching block table in %u block\n",
2581 (unsigned int)BT_Block);
2582
2583 bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
2584
2585 for (i = bt_pages; i < DeviceInfo.wPagesPerBlock;
2586 i += (bt_pages + 1)) {
2587 nand_dbg_print(NAND_DBG_DEBUG,
2588 "Searching last IPF: %d\n", i);
2589 Result = GLOB_LLD_Read_Page_Main_Polling(tempbuf,
2590 BT_Block, i, 1);
2591
2592 if (0 == memcmp(tempbuf, g_pIPF, DeviceInfo.wPageDataSize)) {
2593 if ((i + bt_pages + 1) < DeviceInfo.wPagesPerBlock) {
2594 continue;
2595 } else {
2596 search_in_previous_pages = 1;
2597 Last_IPF = i;
2598 }
2599 }
2600
2601 if (!search_in_previous_pages) {
2602 if (i != bt_pages) {
2603 i -= (bt_pages + 1);
2604 Last_IPF = i;
2605 }
2606 }
2607
2608 if (0 == Last_IPF)
2609 break;
2610
2611 if (!search_in_previous_pages) {
2612 i = i + 1;
2613 nand_dbg_print(NAND_DBG_DEBUG,
2614 "Reading the spare area of Block %u Page %u",
2615 (unsigned int)BT_Block, i);
2616 Result = GLOB_LLD_Read_Page_Spare(pSpareBuf,
2617 BT_Block, i, 1);
2618 nand_dbg_print(NAND_DBG_DEBUG,
2619 "Reading the spare area of Block %u Page %u",
2620 (unsigned int)BT_Block, i + bt_pages - 1);
2621 Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
2622 BT_Block, i + bt_pages - 1, 1);
2623
2624 k = 0;
2625 j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
2626 if (j) {
2627 for (; k < j; k++) {
2628 if (tagarray[k] == BT_Tag)
2629 break;
2630 }
2631 }
2632
2633 if (k < j)
2634 bt_flag = tagarray[k];
2635 else
2636 Result = FAIL;
2637
2638 if (Result == PASS) {
2639 k = 0;
2640 j = FTL_Extract_Block_Table_Tag(
2641 pSpareBufBTLastPage, &tagarray);
2642 if (j) {
2643 for (; k < j; k++) {
2644 if (tagarray[k] == BT_Tag)
2645 break;
2646 }
2647 }
2648
2649 if (k < j)
2650 bt_flag_last_page = tagarray[k];
2651 else
2652 Result = FAIL;
2653
2654 if (Result == PASS) {
2655 if (bt_flag == bt_flag_last_page) {
2656 nand_dbg_print(NAND_DBG_DEBUG,
2657 "Block table is found"
2658 " in page after IPF "
2659 "at block %d "
2660 "page %d\n",
2661 (int)BT_Block, i);
2662 BT_Found = 1;
2663 *Page = i;
2664 g_cBlockTableStatus =
2665 CURRENT_BLOCK_TABLE;
2666 break;
2667 } else {
2668 Result = FAIL;
2669 }
2670 }
2671 }
2672 }
2673
2674 if (search_in_previous_pages)
2675 i = i - bt_pages;
2676 else
2677 i = i - (bt_pages + 1);
2678
2679 Result = PASS;
2680
2681 nand_dbg_print(NAND_DBG_DEBUG,
2682 "Reading the spare area of Block %d Page %d",
2683 (int)BT_Block, i);
2684
2685 Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1);
2686 nand_dbg_print(NAND_DBG_DEBUG,
2687 "Reading the spare area of Block %u Page %u",
2688 (unsigned int)BT_Block, i + bt_pages - 1);
2689
2690 Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
2691 BT_Block, i + bt_pages - 1, 1);
2692
2693 k = 0;
2694 j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
2695 if (j) {
2696 for (; k < j; k++) {
2697 if (tagarray[k] == BT_Tag)
2698 break;
2699 }
2700 }
2701
2702 if (k < j)
2703 bt_flag = tagarray[k];
2704 else
2705 Result = FAIL;
2706
2707 if (Result == PASS) {
2708 k = 0;
2709 j = FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage,
2710 &tagarray);
2711 if (j) {
2712 for (; k < j; k++) {
2713 if (tagarray[k] == BT_Tag)
2714 break;
2715 }
2716 }
2717
2718 if (k < j) {
2719 bt_flag_last_page = tagarray[k];
2720 } else {
2721 Result = FAIL;
2722 break;
2723 }
2724
2725 if (Result == PASS) {
2726 if (bt_flag == bt_flag_last_page) {
2727 nand_dbg_print(NAND_DBG_DEBUG,
2728 "Block table is found "
2729 "in page prior to IPF "
2730 "at block %u page %d\n",
2731 (unsigned int)BT_Block, i);
2732 BT_Found = 1;
2733 *Page = i;
2734 g_cBlockTableStatus =
2735 IN_PROGRESS_BLOCK_TABLE;
2736 break;
2737 } else {
2738 Result = FAIL;
2739 break;
2740 }
2741 }
2742 }
2743 }
2744
2745 if (Result == FAIL) {
2746 if ((Last_IPF > bt_pages) && (i < Last_IPF) && (!BT_Found)) {
2747 BT_Found = 1;
2748 *Page = i - (bt_pages + 1);
2749 }
2750 if ((Last_IPF == bt_pages) && (i < Last_IPF) && (!BT_Found))
2751 goto func_return;
2752 }
2753
2754 if (Last_IPF == 0) {
2755 i = 0;
2756 Result = PASS;
2757 nand_dbg_print(NAND_DBG_DEBUG, "Reading the spare area of "
2758 "Block %u Page %u", (unsigned int)BT_Block, i);
2759
2760 Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1);
2761 nand_dbg_print(NAND_DBG_DEBUG,
2762 "Reading the spare area of Block %u Page %u",
2763 (unsigned int)BT_Block, i + bt_pages - 1);
2764 Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
2765 BT_Block, i + bt_pages - 1, 1);
2766
2767 k = 0;
2768 j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
2769 if (j) {
2770 for (; k < j; k++) {
2771 if (tagarray[k] == BT_Tag)
2772 break;
2773 }
2774 }
2775
2776 if (k < j)
2777 bt_flag = tagarray[k];
2778 else
2779 Result = FAIL;
2780
2781 if (Result == PASS) {
2782 k = 0;
2783 j = FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage,
2784 &tagarray);
2785 if (j) {
2786 for (; k < j; k++) {
2787 if (tagarray[k] == BT_Tag)
2788 break;
2789 }
2790 }
2791
2792 if (k < j)
2793 bt_flag_last_page = tagarray[k];
2794 else
2795 Result = FAIL;
2796
2797 if (Result == PASS) {
2798 if (bt_flag == bt_flag_last_page) {
2799 nand_dbg_print(NAND_DBG_DEBUG,
2800 "Block table is found "
2801 "in page after IPF at "
2802 "block %u page %u\n",
2803 (unsigned int)BT_Block,
2804 (unsigned int)i);
2805 BT_Found = 1;
2806 *Page = i;
2807 g_cBlockTableStatus =
2808 CURRENT_BLOCK_TABLE;
2809 goto func_return;
2810 } else {
2811 Result = FAIL;
2812 }
2813 }
2814 }
2815
2816 if (Result == FAIL)
2817 goto func_return;
2818 }
2819 func_return:
2820 return Result;
2821 }
2822
2823 u8 *get_blk_table_start_addr(void)
2824 {
2825 return g_pBlockTable;
2826 }
2827
2828 unsigned long get_blk_table_len(void)
2829 {
2830 return DeviceInfo.wDataBlockNum * sizeof(u32);
2831 }
2832
2833 u8 *get_wear_leveling_table_start_addr(void)
2834 {
2835 return g_pWearCounter;
2836 }
2837
2838 unsigned long get_wear_leveling_table_len(void)
2839 {
2840 return DeviceInfo.wDataBlockNum * sizeof(u8);
2841 }
2842
2843 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2844 * Function: FTL_Read_Block_Table
2845 * Inputs: none
2846 * Outputs: PASS / FAIL
2847 * Description: read the flash spare area and find a block containing the
2848 * most recent block table(having largest block_table_counter).
2849 * Find the last written Block table in this block.
2850 * Check the correctness of Block Table
2851 * If CDMA is enabled, this function is called in
2852 * polling mode.
2853 * We don't need to store changes in Block table in this
2854 * function as it is called only at initialization
2855 *
2856 * Note: Currently this function is called at initialization
2857 * before any read/erase/write command issued to flash so,
2858 * there is no need to wait for CDMA list to complete as of now
2859 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2860 static int FTL_Read_Block_Table(void)
2861 {
2862 u16 i = 0;
2863 int k, j;
2864 u8 *tempBuf, *tagarray;
2865 int wResult = FAIL;
2866 int status = FAIL;
2867 u8 block_table_found = 0;
2868 int search_result;
2869 u32 Block;
2870 u16 Page = 0;
2871 u16 PageCount;
2872 u16 bt_pages;
2873 int wBytesCopied = 0, tempvar;
2874
2875 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2876 __FILE__, __LINE__, __func__);
2877
2878 tempBuf = tmp_buf1_read_blk_table;
2879 bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
2880
2881 for (j = DeviceInfo.wSpectraStartBlock;
2882 j <= (int)DeviceInfo.wSpectraEndBlock;
2883 j++) {
2884 status = GLOB_LLD_Read_Page_Spare(tempBuf, j, 0, 1);
2885 k = 0;
2886 i = FTL_Extract_Block_Table_Tag(tempBuf, &tagarray);
2887 if (i) {
2888 status = GLOB_LLD_Read_Page_Main_Polling(tempBuf,
2889 j, 0, 1);
2890 for (; k < i; k++) {
2891 if (tagarray[k] == tempBuf[3])
2892 break;
2893 }
2894 }
2895
2896 if (k < i)
2897 k = tagarray[k];
2898 else
2899 continue;
2900
2901 nand_dbg_print(NAND_DBG_DEBUG,
2902 "Block table is contained in Block %d %d\n",
2903 (unsigned int)j, (unsigned int)k);
2904
2905 if (g_pBTBlocks[k-FIRST_BT_ID] == BTBLOCK_INVAL) {
2906 g_pBTBlocks[k-FIRST_BT_ID] = j;
2907 block_table_found = 1;
2908 } else {
2909 printk(KERN_ERR "FTL_Read_Block_Table -"
2910 "This should never happens. "
2911 "Two block table have same counter %u!\n", k);
2912 }
2913 }
2914
2915 if (block_table_found) {
2916 if (g_pBTBlocks[FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL &&
2917 g_pBTBlocks[LAST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) {
2918 j = LAST_BT_ID;
2919 while ((j > FIRST_BT_ID) &&
2920 (g_pBTBlocks[j - FIRST_BT_ID] != BTBLOCK_INVAL))
2921 j--;
2922 if (j == FIRST_BT_ID) {
2923 j = LAST_BT_ID;
2924 last_erased = LAST_BT_ID;
2925 } else {
2926 last_erased = (u8)j + 1;
2927 while ((j > FIRST_BT_ID) && (BTBLOCK_INVAL ==
2928 g_pBTBlocks[j - FIRST_BT_ID]))
2929 j--;
2930 }
2931 } else {
2932 j = FIRST_BT_ID;
2933 while (g_pBTBlocks[j - FIRST_BT_ID] == BTBLOCK_INVAL)
2934 j++;
2935 last_erased = (u8)j;
2936 while ((j < LAST_BT_ID) && (BTBLOCK_INVAL !=
2937 g_pBTBlocks[j - FIRST_BT_ID]))
2938 j++;
2939 if (g_pBTBlocks[j-FIRST_BT_ID] == BTBLOCK_INVAL)
2940 j--;
2941 }
2942
2943 if (last_erased > j)
2944 j += (1 + LAST_BT_ID - FIRST_BT_ID);
2945
2946 for (; (j >= last_erased) && (FAIL == wResult); j--) {
2947 i = (j - FIRST_BT_ID) %
2948 (1 + LAST_BT_ID - FIRST_BT_ID);
2949 search_result =
2950 FTL_Search_Block_Table_IN_Block(g_pBTBlocks[i],
2951 i + FIRST_BT_ID, &Page);
2952 if (g_cBlockTableStatus == IN_PROGRESS_BLOCK_TABLE)
2953 block_table_found = 0;
2954
2955 while ((search_result == PASS) && (FAIL == wResult)) {
2956 nand_dbg_print(NAND_DBG_DEBUG,
2957 "FTL_Read_Block_Table:"
2958 "Block: %u Page: %u "
2959 "contains block table\n",
2960 (unsigned int)g_pBTBlocks[i],
2961 (unsigned int)Page);
2962
2963 tempBuf = tmp_buf2_read_blk_table;
2964
2965 for (k = 0; k < bt_pages; k++) {
2966 Block = g_pBTBlocks[i];
2967 PageCount = 1;
2968
2969 status =
2970 GLOB_LLD_Read_Page_Main_Polling(
2971 tempBuf, Block, Page, PageCount);
2972
2973 tempvar = k ? 0 : 4;
2974
2975 wBytesCopied +=
2976 FTL_Copy_Block_Table_From_Flash(
2977 tempBuf + tempvar,
2978 DeviceInfo.wPageDataSize - tempvar,
2979 wBytesCopied);
2980
2981 Page++;
2982 }
2983
2984 wResult = FTL_Check_Block_Table(FAIL);
2985 if (FAIL == wResult) {
2986 block_table_found = 0;
2987 if (Page > bt_pages)
2988 Page -= ((bt_pages<<1) + 1);
2989 else
2990 search_result = FAIL;
2991 }
2992 }
2993 }
2994 }
2995
2996 if (PASS == wResult) {
2997 if (!block_table_found)
2998 FTL_Execute_SPL_Recovery();
2999
3000 if (g_cBlockTableStatus == IN_PROGRESS_BLOCK_TABLE)
3001 g_wBlockTableOffset = (u16)Page + 1;
3002 else
3003 g_wBlockTableOffset = (u16)Page - bt_pages;
3004
3005 g_wBlockTableIndex = (u32)g_pBTBlocks[i];
3006
3007 #if CMD_DMA
3008 if (DeviceInfo.MLCDevice)
3009 memcpy(g_pBTStartingCopy, g_pBlockTable,
3010 DeviceInfo.wDataBlockNum * sizeof(u32)
3011 + DeviceInfo.wDataBlockNum * sizeof(u8)
3012 + DeviceInfo.wDataBlockNum * sizeof(u16));
3013 else
3014 memcpy(g_pBTStartingCopy, g_pBlockTable,
3015 DeviceInfo.wDataBlockNum * sizeof(u32)
3016 + DeviceInfo.wDataBlockNum * sizeof(u8));
3017 #endif
3018 }
3019
3020 if (FAIL == wResult)
3021 printk(KERN_ERR "Yunpeng - "
3022 "Can not find valid spectra block table!\n");
3023
3024 #if AUTO_FORMAT_FLASH
3025 if (FAIL == wResult) {
3026 nand_dbg_print(NAND_DBG_DEBUG, "doing auto-format\n");
3027 wResult = FTL_Format_Flash(0);
3028 }
3029 #endif
3030
3031 return wResult;
3032 }
3033
3034
3035 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3036 * Function: FTL_Flash_Error_Handle
3037 * Inputs: Pointer to data
3038 * Page address
3039 * Block address
3040 * Outputs: PASS=0 / FAIL=1
3041 * Description: It handles any error occured during Spectra operation
3042 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3043 static int FTL_Flash_Error_Handle(u8 *pData, u64 old_page_addr,
3044 u64 blk_addr)
3045 {
3046 u32 i;
3047 int j;
3048 u32 tmp_node, blk_node = BLK_FROM_ADDR(blk_addr);
3049 u64 phy_addr;
3050 int wErase = FAIL;
3051 int wResult = FAIL;
3052 u32 *pbt = (u32 *)g_pBlockTable;
3053
3054 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3055 __FILE__, __LINE__, __func__);
3056
3057 if (ERR == GLOB_FTL_Garbage_Collection())
3058 return ERR;
3059
3060 do {
3061 for (i = DeviceInfo.wSpectraEndBlock -
3062 DeviceInfo.wSpectraStartBlock;
3063 i > 0; i--) {
3064 if (IS_SPARE_BLOCK(i)) {
3065 tmp_node = (u32)(BAD_BLOCK |
3066 pbt[blk_node]);
3067 pbt[blk_node] = (u32)(pbt[i] &
3068 (~SPARE_BLOCK));
3069 pbt[i] = tmp_node;
3070 #if CMD_DMA
3071 p_BTableChangesDelta =
3072 (struct BTableChangesDelta *)
3073 g_pBTDelta_Free;
3074 g_pBTDelta_Free +=
3075 sizeof(struct BTableChangesDelta);
3076
3077 p_BTableChangesDelta->ftl_cmd_cnt =
3078 ftl_cmd_cnt;
3079 p_BTableChangesDelta->BT_Index =
3080 blk_node;
3081 p_BTableChangesDelta->BT_Entry_Value =
3082 pbt[blk_node];
3083 p_BTableChangesDelta->ValidFields = 0x0C;
3084
3085 p_BTableChangesDelta =
3086 (struct BTableChangesDelta *)
3087 g_pBTDelta_Free;
3088 g_pBTDelta_Free +=
3089 sizeof(struct BTableChangesDelta);
3090
3091 p_BTableChangesDelta->ftl_cmd_cnt =
3092 ftl_cmd_cnt;
3093 p_BTableChangesDelta->BT_Index = i;
3094 p_BTableChangesDelta->BT_Entry_Value = pbt[i];
3095 p_BTableChangesDelta->ValidFields = 0x0C;
3096 #endif
3097 wResult = PASS;
3098 break;
3099 }
3100 }
3101
3102 if (FAIL == wResult) {
3103 if (FAIL == GLOB_FTL_Garbage_Collection())
3104 break;
3105 else
3106 continue;
3107 }
3108
3109 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
3110 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
3111 FTL_Write_IN_Progress_Block_Table_Page();
3112 }
3113
3114 phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
3115
3116 for (j = 0; j < RETRY_TIMES; j++) {
3117 if (PASS == wErase) {
3118 if (FAIL == GLOB_FTL_Block_Erase(phy_addr)) {
3119 MARK_BLOCK_AS_BAD(pbt[blk_node]);
3120 break;
3121 }
3122 }
3123 if (PASS == FTL_Cache_Update_Block(pData,
3124 old_page_addr,
3125 phy_addr)) {
3126 wResult = PASS;
3127 break;
3128 } else {
3129 wResult = FAIL;
3130 wErase = PASS;
3131 }
3132 }
3133 } while (FAIL == wResult);
3134
3135 FTL_Write_Block_Table(FAIL);
3136
3137 return wResult;
3138 }
3139
3140 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3141 * Function: FTL_Get_Page_Num
3142 * Inputs: Size in bytes
3143 * Outputs: Size in pages
3144 * Description: It calculates the pages required for the length passed
3145 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3146 static u32 FTL_Get_Page_Num(u64 length)
3147 {
3148 return (u32)((length >> DeviceInfo.nBitsInPageDataSize) +
3149 (GLOB_u64_Remainder(length , 1) > 0 ? 1 : 0));
3150 }
3151
3152 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3153 * Function: FTL_Get_Physical_Block_Addr
3154 * Inputs: Block Address (byte format)
3155 * Outputs: Physical address of the block.
3156 * Description: It translates LBA to PBA by returning address stored
3157 * at the LBA location in the block table
3158 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3159 static u64 FTL_Get_Physical_Block_Addr(u64 logical_addr)
3160 {
3161 u32 *pbt;
3162 u64 physical_addr;
3163
3164 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3165 __FILE__, __LINE__, __func__);
3166
3167 pbt = (u32 *)g_pBlockTable;
3168 physical_addr = (u64) DeviceInfo.wBlockDataSize *
3169 (pbt[BLK_FROM_ADDR(logical_addr)] & (~BAD_BLOCK));
3170
3171 return physical_addr;
3172 }
3173
3174 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3175 * Function: FTL_Get_Block_Index
3176 * Inputs: Physical Block no.
3177 * Outputs: Logical block no. /BAD_BLOCK
3178 * Description: It returns the logical block no. for the PBA passed
3179 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3180 static u32 FTL_Get_Block_Index(u32 wBlockNum)
3181 {
3182 u32 *pbt = (u32 *)g_pBlockTable;
3183 u32 i;
3184
3185 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3186 __FILE__, __LINE__, __func__);
3187
3188 for (i = 0; i < DeviceInfo.wDataBlockNum; i++)
3189 if (wBlockNum == (pbt[i] & (~BAD_BLOCK)))
3190 return i;
3191
3192 return BAD_BLOCK;
3193 }
3194
3195 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3196 * Function: GLOB_FTL_Wear_Leveling
3197 * Inputs: none
3198 * Outputs: PASS=0
3199 * Description: This is static wear leveling (done by explicit call)
3200 * do complete static wear leveling
3201 * do complete garbage collection
3202 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3203 int GLOB_FTL_Wear_Leveling(void)
3204 {
3205 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
3206 __FILE__, __LINE__, __func__);
3207
3208 FTL_Static_Wear_Leveling();
3209 GLOB_FTL_Garbage_Collection();
3210
3211 return PASS;
3212 }
3213
3214 static void find_least_most_worn(u8 *chg,
3215 u32 *least_idx, u8 *least_cnt,
3216 u32 *most_idx, u8 *most_cnt)
3217 {
3218 u32 *pbt = (u32 *)g_pBlockTable;
3219 u32 idx;
3220 u8 cnt;
3221 int i;
3222
3223 for (i = BLOCK_TABLE_INDEX + 1; i < DeviceInfo.wDataBlockNum; i++) {
3224 if (IS_BAD_BLOCK(i) || PASS == chg[i])
3225 continue;
3226
3227 idx = (u32) ((~BAD_BLOCK) & pbt[i]);
3228 cnt = g_pWearCounter[idx - DeviceInfo.wSpectraStartBlock];
3229
3230 if (IS_SPARE_BLOCK(i)) {
3231 if (cnt > *most_cnt) {
3232 *most_cnt = cnt;
3233 *most_idx = idx;
3234 }
3235 }
3236
3237 if (IS_DATA_BLOCK(i)) {
3238 if (cnt < *least_cnt) {
3239 *least_cnt = cnt;
3240 *least_idx = idx;
3241 }
3242 }
3243
3244 if (PASS == chg[*most_idx] || PASS == chg[*least_idx]) {
3245 debug_boundary_error(*most_idx,
3246 DeviceInfo.wDataBlockNum, 0);
3247 debug_boundary_error(*least_idx,
3248 DeviceInfo.wDataBlockNum, 0);
3249 continue;
3250 }
3251 }
3252 }
3253
3254 static int move_blks_for_wear_leveling(u8 *chg,
3255 u32 *least_idx, u32 *rep_blk_num, int *result)
3256 {
3257 u32 *pbt = (u32 *)g_pBlockTable;
3258 u32 rep_blk;
3259 int j, ret_cp_blk, ret_erase;
3260 int ret = PASS;
3261
3262 chg[*least_idx] = PASS;
3263 debug_boundary_error(*least_idx, DeviceInfo.wDataBlockNum, 0);
3264
3265 rep_blk = FTL_Replace_MWBlock();
3266 if (rep_blk != BAD_BLOCK) {
3267 nand_dbg_print(NAND_DBG_DEBUG,
3268 "More than two spare blocks exist so do it\n");
3269 nand_dbg_print(NAND_DBG_DEBUG, "Block Replaced is %d\n",
3270 rep_blk);
3271
3272 chg[rep_blk] = PASS;
3273
3274 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
3275 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
3276 FTL_Write_IN_Progress_Block_Table_Page();
3277 }
3278
3279 for (j = 0; j < RETRY_TIMES; j++) {
3280 ret_cp_blk = FTL_Copy_Block((u64)(*least_idx) *
3281 DeviceInfo.wBlockDataSize,
3282 (u64)rep_blk * DeviceInfo.wBlockDataSize);
3283 if (FAIL == ret_cp_blk) {
3284 ret_erase = GLOB_FTL_Block_Erase((u64)rep_blk
3285 * DeviceInfo.wBlockDataSize);
3286 if (FAIL == ret_erase)
3287 MARK_BLOCK_AS_BAD(pbt[rep_blk]);
3288 } else {
3289 nand_dbg_print(NAND_DBG_DEBUG,
3290 "FTL_Copy_Block == OK\n");
3291 break;
3292 }
3293 }
3294
3295 if (j < RETRY_TIMES) {
3296 u32 tmp;
3297 u32 old_idx = FTL_Get_Block_Index(*least_idx);
3298 u32 rep_idx = FTL_Get_Block_Index(rep_blk);
3299 tmp = (u32)(DISCARD_BLOCK | pbt[old_idx]);
3300 pbt[old_idx] = (u32)((~SPARE_BLOCK) &
3301 pbt[rep_idx]);
3302 pbt[rep_idx] = tmp;
3303 #if CMD_DMA
3304 p_BTableChangesDelta = (struct BTableChangesDelta *)
3305 g_pBTDelta_Free;
3306 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3307 p_BTableChangesDelta->ftl_cmd_cnt =
3308 ftl_cmd_cnt;
3309 p_BTableChangesDelta->BT_Index = old_idx;
3310 p_BTableChangesDelta->BT_Entry_Value = pbt[old_idx];
3311 p_BTableChangesDelta->ValidFields = 0x0C;
3312
3313 p_BTableChangesDelta = (struct BTableChangesDelta *)
3314 g_pBTDelta_Free;
3315 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3316
3317 p_BTableChangesDelta->ftl_cmd_cnt =
3318 ftl_cmd_cnt;
3319 p_BTableChangesDelta->BT_Index = rep_idx;
3320 p_BTableChangesDelta->BT_Entry_Value = pbt[rep_idx];
3321 p_BTableChangesDelta->ValidFields = 0x0C;
3322 #endif
3323 } else {
3324 pbt[FTL_Get_Block_Index(rep_blk)] |= BAD_BLOCK;
3325 #if CMD_DMA
3326 p_BTableChangesDelta = (struct BTableChangesDelta *)
3327 g_pBTDelta_Free;
3328 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3329
3330 p_BTableChangesDelta->ftl_cmd_cnt =
3331 ftl_cmd_cnt;
3332 p_BTableChangesDelta->BT_Index =
3333 FTL_Get_Block_Index(rep_blk);
3334 p_BTableChangesDelta->BT_Entry_Value =
3335 pbt[FTL_Get_Block_Index(rep_blk)];
3336 p_BTableChangesDelta->ValidFields = 0x0C;
3337 #endif
3338 *result = FAIL;
3339 ret = FAIL;
3340 }
3341
3342 if (((*rep_blk_num)++) > WEAR_LEVELING_BLOCK_NUM)
3343 ret = FAIL;
3344 } else {
3345 printk(KERN_ERR "Less than 3 spare blocks exist so quit\n");
3346 ret = FAIL;
3347 }
3348
3349 return ret;
3350 }
3351
3352 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3353 * Function: FTL_Static_Wear_Leveling
3354 * Inputs: none
3355 * Outputs: PASS=0 / FAIL=1
3356 * Description: This is static wear leveling (done by explicit call)
3357 * search for most&least used
3358 * if difference < GATE:
3359 * update the block table with exhange
3360 * mark block table in flash as IN_PROGRESS
3361 * copy flash block
3362 * the caller should handle GC clean up after calling this function
3363 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3364 int FTL_Static_Wear_Leveling(void)
3365 {
3366 u8 most_worn_cnt;
3367 u8 least_worn_cnt;
3368 u32 most_worn_idx;
3369 u32 least_worn_idx;
3370 int result = PASS;
3371 int go_on = PASS;
3372 u32 replaced_blks = 0;
3373 u8 *chang_flag = flags_static_wear_leveling;
3374
3375 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
3376 __FILE__, __LINE__, __func__);
3377
3378 if (!chang_flag)
3379 return FAIL;
3380
3381 memset(chang_flag, FAIL, DeviceInfo.wDataBlockNum);
3382 while (go_on == PASS) {
3383 nand_dbg_print(NAND_DBG_DEBUG,
3384 "starting static wear leveling\n");
3385 most_worn_cnt = 0;
3386 least_worn_cnt = 0xFF;
3387 least_worn_idx = BLOCK_TABLE_INDEX;
3388 most_worn_idx = BLOCK_TABLE_INDEX;
3389
3390 find_least_most_worn(chang_flag, &least_worn_idx,
3391 &least_worn_cnt, &most_worn_idx, &most_worn_cnt);
3392
3393 nand_dbg_print(NAND_DBG_DEBUG,
3394 "Used and least worn is block %u, whos count is %u\n",
3395 (unsigned int)least_worn_idx,
3396 (unsigned int)least_worn_cnt);
3397
3398 nand_dbg_print(NAND_DBG_DEBUG,
3399 "Free and most worn is block %u, whos count is %u\n",
3400 (unsigned int)most_worn_idx,
3401 (unsigned int)most_worn_cnt);
3402
3403 if ((most_worn_cnt > least_worn_cnt) &&
3404 (most_worn_cnt - least_worn_cnt > WEAR_LEVELING_GATE))
3405 go_on = move_blks_for_wear_leveling(chang_flag,
3406 &least_worn_idx, &replaced_blks, &result);
3407 else
3408 go_on = FAIL;
3409 }
3410
3411 return result;
3412 }
3413
3414 #if CMD_DMA
3415 static int do_garbage_collection(u32 discard_cnt)
3416 {
3417 u32 *pbt = (u32 *)g_pBlockTable;
3418 u32 pba;
3419 u8 bt_block_erased = 0;
3420 int i, cnt, ret = FAIL;
3421 u64 addr;
3422
3423 i = 0;
3424 while ((i < DeviceInfo.wDataBlockNum) && (discard_cnt > 0) &&
3425 ((ftl_cmd_cnt + 28) < 256)) {
3426 if (((pbt[i] & BAD_BLOCK) != BAD_BLOCK) &&
3427 (pbt[i] & DISCARD_BLOCK)) {
3428 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
3429 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
3430 FTL_Write_IN_Progress_Block_Table_Page();
3431 }
3432
3433 addr = FTL_Get_Physical_Block_Addr((u64)i *
3434 DeviceInfo.wBlockDataSize);
3435 pba = BLK_FROM_ADDR(addr);
3436
3437 for (cnt = FIRST_BT_ID; cnt <= LAST_BT_ID; cnt++) {
3438 if (pba == g_pBTBlocks[cnt - FIRST_BT_ID]) {
3439 nand_dbg_print(NAND_DBG_DEBUG,
3440 "GC will erase BT block %u\n",
3441 (unsigned int)pba);
3442 discard_cnt--;
3443 i++;
3444 bt_block_erased = 1;
3445 break;
3446 }
3447 }
3448
3449 if (bt_block_erased) {
3450 bt_block_erased = 0;
3451 continue;
3452 }
3453
3454 addr = FTL_Get_Physical_Block_Addr((u64)i *
3455 DeviceInfo.wBlockDataSize);
3456
3457 if (PASS == GLOB_FTL_Block_Erase(addr)) {
3458 pbt[i] &= (u32)(~DISCARD_BLOCK);
3459 pbt[i] |= (u32)(SPARE_BLOCK);
3460 p_BTableChangesDelta =
3461 (struct BTableChangesDelta *)
3462 g_pBTDelta_Free;
3463 g_pBTDelta_Free +=
3464 sizeof(struct BTableChangesDelta);
3465 p_BTableChangesDelta->ftl_cmd_cnt =
3466 ftl_cmd_cnt - 1;
3467 p_BTableChangesDelta->BT_Index = i;
3468 p_BTableChangesDelta->BT_Entry_Value = pbt[i];
3469 p_BTableChangesDelta->ValidFields = 0x0C;
3470 discard_cnt--;
3471 ret = PASS;
3472 } else {
3473 MARK_BLOCK_AS_BAD(pbt[i]);
3474 }
3475 }
3476
3477 i++;
3478 }
3479
3480 return ret;
3481 }
3482
3483 #else
3484 static int do_garbage_collection(u32 discard_cnt)
3485 {
3486 u32 *pbt = (u32 *)g_pBlockTable;
3487 u32 pba;
3488 u8 bt_block_erased = 0;
3489 int i, cnt, ret = FAIL;
3490 u64 addr;
3491
3492 i = 0;
3493 while ((i < DeviceInfo.wDataBlockNum) && (discard_cnt > 0)) {
3494 if (((pbt[i] & BAD_BLOCK) != BAD_BLOCK) &&
3495 (pbt[i] & DISCARD_BLOCK)) {
3496 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
3497 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
3498 FTL_Write_IN_Progress_Block_Table_Page();
3499 }
3500
3501 addr = FTL_Get_Physical_Block_Addr((u64)i *
3502 DeviceInfo.wBlockDataSize);
3503 pba = BLK_FROM_ADDR(addr);
3504
3505 for (cnt = FIRST_BT_ID; cnt <= LAST_BT_ID; cnt++) {
3506 if (pba == g_pBTBlocks[cnt - FIRST_BT_ID]) {
3507 nand_dbg_print(NAND_DBG_DEBUG,
3508 "GC will erase BT block %d\n",
3509 pba);
3510 discard_cnt--;
3511 i++;
3512 bt_block_erased = 1;
3513 break;
3514 }
3515 }
3516
3517 if (bt_block_erased) {
3518 bt_block_erased = 0;
3519 continue;
3520 }
3521
3522 /* If the discard block is L2 cache block, then just skip it */
3523 for (cnt = 0; cnt < BLK_NUM_FOR_L2_CACHE; cnt++) {
3524 if (cache_l2.blk_array[cnt] == pba) {
3525 nand_dbg_print(NAND_DBG_DEBUG,
3526 "GC will erase L2 cache blk %d\n",
3527 pba);
3528 break;
3529 }
3530 }
3531 if (cnt < BLK_NUM_FOR_L2_CACHE) { /* Skip it */
3532 discard_cnt--;
3533 i++;
3534 continue;
3535 }
3536
3537 addr = FTL_Get_Physical_Block_Addr((u64)i *
3538 DeviceInfo.wBlockDataSize);
3539
3540 if (PASS == GLOB_FTL_Block_Erase(addr)) {
3541 pbt[i] &= (u32)(~DISCARD_BLOCK);
3542 pbt[i] |= (u32)(SPARE_BLOCK);
3543 discard_cnt--;
3544 ret = PASS;
3545 } else {
3546 MARK_BLOCK_AS_BAD(pbt[i]);
3547 }
3548 }
3549
3550 i++;
3551 }
3552
3553 return ret;
3554 }
3555 #endif
3556
3557 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3558 * Function: GLOB_FTL_Garbage_Collection
3559 * Inputs: none
3560 * Outputs: PASS / FAIL (returns the number of un-erased blocks
3561 * Description: search the block table for all discarded blocks to erase
3562 * for each discarded block:
3563 * set the flash block to IN_PROGRESS
3564 * erase the block
3565 * update the block table
3566 * write the block table to flash
3567 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3568 int GLOB_FTL_Garbage_Collection(void)
3569 {
3570 u32 i;
3571 u32 wDiscard = 0;
3572 int wResult = FAIL;
3573 u32 *pbt = (u32 *)g_pBlockTable;
3574
3575 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
3576 __FILE__, __LINE__, __func__);
3577
3578 if (GC_Called) {
3579 printk(KERN_ALERT "GLOB_FTL_Garbage_Collection() "
3580 "has been re-entered! Exit.\n");
3581 return PASS;
3582 }
3583
3584 GC_Called = 1;
3585
3586 GLOB_FTL_BT_Garbage_Collection();
3587
3588 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
3589 if (IS_DISCARDED_BLOCK(i))
3590 wDiscard++;
3591 }
3592
3593 if (wDiscard <= 0) {
3594 GC_Called = 0;
3595 return wResult;
3596 }
3597
3598 nand_dbg_print(NAND_DBG_DEBUG,
3599 "Found %d discarded blocks\n", wDiscard);
3600
3601 FTL_Write_Block_Table(FAIL);
3602
3603 wResult = do_garbage_collection(wDiscard);
3604
3605 FTL_Write_Block_Table(FAIL);
3606
3607 GC_Called = 0;
3608
3609 return wResult;
3610 }
3611
3612
3613 #if CMD_DMA
3614 static int do_bt_garbage_collection(void)
3615 {
3616 u32 pba, lba;
3617 u32 *pbt = (u32 *)g_pBlockTable;
3618 u32 *pBTBlocksNode = (u32 *)g_pBTBlocks;
3619 u64 addr;
3620 int i, ret = FAIL;
3621
3622 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3623 __FILE__, __LINE__, __func__);
3624
3625 if (BT_GC_Called)
3626 return PASS;
3627
3628 BT_GC_Called = 1;
3629
3630 for (i = last_erased; (i <= LAST_BT_ID) &&
3631 (g_pBTBlocks[((i + 2) % (1 + LAST_BT_ID - FIRST_BT_ID)) +
3632 FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) &&
3633 ((ftl_cmd_cnt + 28)) < 256; i++) {
3634 pba = pBTBlocksNode[i - FIRST_BT_ID];
3635 lba = FTL_Get_Block_Index(pba);
3636 nand_dbg_print(NAND_DBG_DEBUG,
3637 "do_bt_garbage_collection: pba %d, lba %d\n",
3638 pba, lba);
3639 nand_dbg_print(NAND_DBG_DEBUG,
3640 "Block Table Entry: %d", pbt[lba]);
3641
3642 if (((pbt[lba] & BAD_BLOCK) != BAD_BLOCK) &&
3643 (pbt[lba] & DISCARD_BLOCK)) {
3644 nand_dbg_print(NAND_DBG_DEBUG,
3645 "do_bt_garbage_collection_cdma: "
3646 "Erasing Block tables present in block %d\n",
3647 pba);
3648 addr = FTL_Get_Physical_Block_Addr((u64)lba *
3649 DeviceInfo.wBlockDataSize);
3650 if (PASS == GLOB_FTL_Block_Erase(addr)) {
3651 pbt[lba] &= (u32)(~DISCARD_BLOCK);
3652 pbt[lba] |= (u32)(SPARE_BLOCK);
3653
3654 p_BTableChangesDelta =
3655 (struct BTableChangesDelta *)
3656 g_pBTDelta_Free;
3657 g_pBTDelta_Free +=
3658 sizeof(struct BTableChangesDelta);
3659
3660 p_BTableChangesDelta->ftl_cmd_cnt =
3661 ftl_cmd_cnt - 1;
3662 p_BTableChangesDelta->BT_Index = lba;
3663 p_BTableChangesDelta->BT_Entry_Value =
3664 pbt[lba];
3665
3666 p_BTableChangesDelta->ValidFields = 0x0C;
3667
3668 ret = PASS;
3669 pBTBlocksNode[last_erased - FIRST_BT_ID] =
3670 BTBLOCK_INVAL;
3671 nand_dbg_print(NAND_DBG_DEBUG,
3672 "resetting bt entry at index %d "
3673 "value %d\n", i,
3674 pBTBlocksNode[i - FIRST_BT_ID]);
3675 if (last_erased == LAST_BT_ID)
3676 last_erased = FIRST_BT_ID;
3677 else
3678 last_erased++;
3679 } else {
3680 MARK_BLOCK_AS_BAD(pbt[lba]);
3681 }
3682 }
3683 }
3684
3685 BT_GC_Called = 0;
3686
3687 return ret;
3688 }
3689
3690 #else
3691 static int do_bt_garbage_collection(void)
3692 {
3693 u32 pba, lba;
3694 u32 *pbt = (u32 *)g_pBlockTable;
3695 u32 *pBTBlocksNode = (u32 *)g_pBTBlocks;
3696 u64 addr;
3697 int i, ret = FAIL;
3698
3699 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3700 __FILE__, __LINE__, __func__);
3701
3702 if (BT_GC_Called)
3703 return PASS;
3704
3705 BT_GC_Called = 1;
3706
3707 for (i = last_erased; (i <= LAST_BT_ID) &&
3708 (g_pBTBlocks[((i + 2) % (1 + LAST_BT_ID - FIRST_BT_ID)) +
3709 FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL); i++) {
3710 pba = pBTBlocksNode[i - FIRST_BT_ID];
3711 lba = FTL_Get_Block_Index(pba);
3712 nand_dbg_print(NAND_DBG_DEBUG,
3713 "do_bt_garbage_collection_cdma: pba %d, lba %d\n",
3714 pba, lba);
3715 nand_dbg_print(NAND_DBG_DEBUG,
3716 "Block Table Entry: %d", pbt[lba]);
3717
3718 if (((pbt[lba] & BAD_BLOCK) != BAD_BLOCK) &&
3719 (pbt[lba] & DISCARD_BLOCK)) {
3720 nand_dbg_print(NAND_DBG_DEBUG,
3721 "do_bt_garbage_collection: "
3722 "Erasing Block tables present in block %d\n",
3723 pba);
3724 addr = FTL_Get_Physical_Block_Addr((u64)lba *
3725 DeviceInfo.wBlockDataSize);
3726 if (PASS == GLOB_FTL_Block_Erase(addr)) {
3727 pbt[lba] &= (u32)(~DISCARD_BLOCK);
3728 pbt[lba] |= (u32)(SPARE_BLOCK);
3729 ret = PASS;
3730 pBTBlocksNode[last_erased - FIRST_BT_ID] =
3731 BTBLOCK_INVAL;
3732 nand_dbg_print(NAND_DBG_DEBUG,
3733 "resetting bt entry at index %d "
3734 "value %d\n", i,
3735 pBTBlocksNode[i - FIRST_BT_ID]);
3736 if (last_erased == LAST_BT_ID)
3737 last_erased = FIRST_BT_ID;
3738 else
3739 last_erased++;
3740 } else {
3741 MARK_BLOCK_AS_BAD(pbt[lba]);
3742 }
3743 }
3744 }
3745
3746 BT_GC_Called = 0;
3747
3748 return ret;
3749 }
3750
3751 #endif
3752
3753 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3754 * Function: GLOB_FTL_BT_Garbage_Collection
3755 * Inputs: none
3756 * Outputs: PASS / FAIL (returns the number of un-erased blocks
3757 * Description: Erases discarded blocks containing Block table
3758 *
3759 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3760 int GLOB_FTL_BT_Garbage_Collection(void)
3761 {
3762 return do_bt_garbage_collection();
3763 }
3764
3765 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3766 * Function: FTL_Replace_OneBlock
3767 * Inputs: Block number 1
3768 * Block number 2
3769 * Outputs: Replaced Block Number
3770 * Description: Interchange block table entries at wBlockNum and wReplaceNum
3771 *
3772 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3773 static u32 FTL_Replace_OneBlock(u32 blk, u32 rep_blk)
3774 {
3775 u32 tmp_blk;
3776 u32 replace_node = BAD_BLOCK;
3777 u32 *pbt = (u32 *)g_pBlockTable;
3778
3779 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3780 __FILE__, __LINE__, __func__);
3781
3782 if (rep_blk != BAD_BLOCK) {
3783 if (IS_BAD_BLOCK(blk))
3784 tmp_blk = pbt[blk];
3785 else
3786 tmp_blk = DISCARD_BLOCK | (~SPARE_BLOCK & pbt[blk]);
3787
3788 replace_node = (u32) ((~SPARE_BLOCK) & pbt[rep_blk]);
3789 pbt[blk] = replace_node;
3790 pbt[rep_blk] = tmp_blk;
3791
3792 #if CMD_DMA
3793 p_BTableChangesDelta =
3794 (struct BTableChangesDelta *)g_pBTDelta_Free;
3795 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3796
3797 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
3798 p_BTableChangesDelta->BT_Index = blk;
3799 p_BTableChangesDelta->BT_Entry_Value = pbt[blk];
3800
3801 p_BTableChangesDelta->ValidFields = 0x0C;
3802
3803 p_BTableChangesDelta =
3804 (struct BTableChangesDelta *)g_pBTDelta_Free;
3805 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3806
3807 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
3808 p_BTableChangesDelta->BT_Index = rep_blk;
3809 p_BTableChangesDelta->BT_Entry_Value = pbt[rep_blk];
3810 p_BTableChangesDelta->ValidFields = 0x0C;
3811 #endif
3812 }
3813
3814 return replace_node;
3815 }
3816
3817 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3818 * Function: FTL_Write_Block_Table_Data
3819 * Inputs: Block table size in pages
3820 * Outputs: PASS=0 / FAIL=1
3821 * Description: Write block table data in flash
3822 * If first page and last page
3823 * Write data+BT flag
3824 * else
3825 * Write data
3826 * BT flag is a counter. Its value is incremented for block table
3827 * write in a new Block
3828 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3829 static int FTL_Write_Block_Table_Data(void)
3830 {
3831 u64 dwBlockTableAddr, pTempAddr;
3832 u32 Block;
3833 u16 Page, PageCount;
3834 u8 *tempBuf = tmp_buf_write_blk_table_data;
3835 int wBytesCopied;
3836 u16 bt_pages;
3837
3838 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3839 __FILE__, __LINE__, __func__);
3840
3841 dwBlockTableAddr =
3842 (u64)((u64)g_wBlockTableIndex * DeviceInfo.wBlockDataSize +
3843 (u64)g_wBlockTableOffset * DeviceInfo.wPageDataSize);
3844 pTempAddr = dwBlockTableAddr;
3845
3846 bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
3847
3848 nand_dbg_print(NAND_DBG_DEBUG, "FTL_Write_Block_Table_Data: "
3849 "page= %d BlockTableIndex= %d "
3850 "BlockTableOffset=%d\n", bt_pages,
3851 g_wBlockTableIndex, g_wBlockTableOffset);
3852
3853 Block = BLK_FROM_ADDR(pTempAddr);
3854 Page = PAGE_FROM_ADDR(pTempAddr, Block);
3855 PageCount = 1;
3856
3857 if (bt_block_changed) {
3858 if (bt_flag == LAST_BT_ID) {
3859 bt_flag = FIRST_BT_ID;
3860 g_pBTBlocks[bt_flag - FIRST_BT_ID] = Block;
3861 } else if (bt_flag < LAST_BT_ID) {
3862 bt_flag++;
3863 g_pBTBlocks[bt_flag - FIRST_BT_ID] = Block;
3864 }
3865
3866 if ((bt_flag > (LAST_BT_ID-4)) &&
3867 g_pBTBlocks[FIRST_BT_ID - FIRST_BT_ID] !=
3868 BTBLOCK_INVAL) {
3869 bt_block_changed = 0;
3870 GLOB_FTL_BT_Garbage_Collection();
3871 }
3872
3873 bt_block_changed = 0;
3874 nand_dbg_print(NAND_DBG_DEBUG,
3875 "Block Table Counter is %u Block %u\n",
3876 bt_flag, (unsigned int)Block);
3877 }
3878
3879 memset(tempBuf, 0, 3);
3880 tempBuf[3] = bt_flag;
3881 wBytesCopied = FTL_Copy_Block_Table_To_Flash(tempBuf + 4,
3882 DeviceInfo.wPageDataSize - 4, 0);
3883 memset(&tempBuf[wBytesCopied + 4], 0xff,
3884 DeviceInfo.wPageSize - (wBytesCopied + 4));
3885 FTL_Insert_Block_Table_Signature(&tempBuf[DeviceInfo.wPageDataSize],
3886 bt_flag);
3887
3888 #if CMD_DMA
3889 memcpy(g_pNextBlockTable, tempBuf,
3890 DeviceInfo.wPageSize * sizeof(u8));
3891 nand_dbg_print(NAND_DBG_DEBUG, "Writing First Page of Block Table "
3892 "Block %u Page %u\n", (unsigned int)Block, Page);
3893 if (FAIL == GLOB_LLD_Write_Page_Main_Spare_cdma(g_pNextBlockTable,
3894 Block, Page, 1,
3895 LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST)) {
3896 nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in "
3897 "%s, Line %d, Function: %s, "
3898 "new Bad Block %d generated!\n",
3899 __FILE__, __LINE__, __func__, Block);
3900 goto func_return;
3901 }
3902
3903 ftl_cmd_cnt++;
3904 g_pNextBlockTable += ((DeviceInfo.wPageSize * sizeof(u8)));
3905 #else
3906 if (FAIL == GLOB_LLD_Write_Page_Main_Spare(tempBuf, Block, Page, 1)) {
3907 nand_dbg_print(NAND_DBG_WARN,
3908 "NAND Program fail in %s, Line %d, Function: %s, "
3909 "new Bad Block %d generated!\n",
3910 __FILE__, __LINE__, __func__, Block);
3911 goto func_return;
3912 }
3913 #endif
3914
3915 if (bt_pages > 1) {
3916 PageCount = bt_pages - 1;
3917 if (PageCount > 1) {
3918 wBytesCopied += FTL_Copy_Block_Table_To_Flash(tempBuf,
3919 DeviceInfo.wPageDataSize * (PageCount - 1),
3920 wBytesCopied);
3921
3922 #if CMD_DMA
3923 memcpy(g_pNextBlockTable, tempBuf,
3924 (PageCount - 1) * DeviceInfo.wPageDataSize);
3925 if (FAIL == GLOB_LLD_Write_Page_Main_cdma(
3926 g_pNextBlockTable, Block, Page + 1,
3927 PageCount - 1)) {
3928 nand_dbg_print(NAND_DBG_WARN,
3929 "NAND Program fail in %s, Line %d, "
3930 "Function: %s, "
3931 "new Bad Block %d generated!\n",
3932 __FILE__, __LINE__, __func__,
3933 (int)Block);
3934 goto func_return;
3935 }
3936
3937 ftl_cmd_cnt++;
3938 g_pNextBlockTable += (PageCount - 1) *
3939 DeviceInfo.wPageDataSize * sizeof(u8);
3940 #else
3941 if (FAIL == GLOB_LLD_Write_Page_Main(tempBuf,
3942 Block, Page + 1, PageCount - 1)) {
3943 nand_dbg_print(NAND_DBG_WARN,
3944 "NAND Program fail in %s, Line %d, "
3945 "Function: %s, "
3946 "new Bad Block %d generated!\n",
3947 __FILE__, __LINE__, __func__,
3948 (int)Block);
3949 goto func_return;
3950 }
3951 #endif
3952 }
3953
3954 wBytesCopied = FTL_Copy_Block_Table_To_Flash(tempBuf,
3955 DeviceInfo.wPageDataSize, wBytesCopied);
3956 memset(&tempBuf[wBytesCopied], 0xff,
3957 DeviceInfo.wPageSize-wBytesCopied);
3958 FTL_Insert_Block_Table_Signature(
3959 &tempBuf[DeviceInfo.wPageDataSize], bt_flag);
3960 #if CMD_DMA
3961 memcpy(g_pNextBlockTable, tempBuf,
3962 DeviceInfo.wPageSize * sizeof(u8));
3963 nand_dbg_print(NAND_DBG_DEBUG,
3964 "Writing the last Page of Block Table "
3965 "Block %u Page %u\n",
3966 (unsigned int)Block, Page + bt_pages - 1);
3967 if (FAIL == GLOB_LLD_Write_Page_Main_Spare_cdma(
3968 g_pNextBlockTable, Block, Page + bt_pages - 1, 1,
3969 LLD_CMD_FLAG_MODE_CDMA |
3970 LLD_CMD_FLAG_ORDER_BEFORE_REST)) {
3971 nand_dbg_print(NAND_DBG_WARN,
3972 "NAND Program fail in %s, Line %d, "
3973 "Function: %s, new Bad Block %d generated!\n",
3974 __FILE__, __LINE__, __func__, Block);
3975 goto func_return;
3976 }
3977 ftl_cmd_cnt++;
3978 #else
3979 if (FAIL == GLOB_LLD_Write_Page_Main_Spare(tempBuf,
3980 Block, Page+bt_pages - 1, 1)) {
3981 nand_dbg_print(NAND_DBG_WARN,
3982 "NAND Program fail in %s, Line %d, "
3983 "Function: %s, "
3984 "new Bad Block %d generated!\n",
3985 __FILE__, __LINE__, __func__, Block);
3986 goto func_return;
3987 }
3988 #endif
3989 }
3990
3991 nand_dbg_print(NAND_DBG_DEBUG, "FTL_Write_Block_Table_Data: done\n");
3992
3993 func_return:
3994 return PASS;
3995 }
3996
3997 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3998 * Function: FTL_Replace_Block_Table
3999 * Inputs: None
4000 * Outputs: PASS=0 / FAIL=1
4001 * Description: Get a new block to write block table
4002 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4003 static u32 FTL_Replace_Block_Table(void)
4004 {
4005 u32 blk;
4006 int gc;
4007
4008 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
4009 __FILE__, __LINE__, __func__);
4010
4011 blk = FTL_Replace_LWBlock(BLOCK_TABLE_INDEX, &gc);
4012
4013 if ((BAD_BLOCK == blk) && (PASS == gc)) {
4014 GLOB_FTL_Garbage_Collection();
4015 blk = FTL_Replace_LWBlock(BLOCK_TABLE_INDEX, &gc);
4016 }
4017 if (BAD_BLOCK == blk)
4018 printk(KERN_ERR "%s, %s: There is no spare block. "
4019 "It should never happen\n",
4020 __FILE__, __func__);
4021
4022 nand_dbg_print(NAND_DBG_DEBUG, "New Block table Block is %d\n", blk);
4023
4024 return blk;
4025 }
4026
4027 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4028 * Function: FTL_Replace_LWBlock
4029 * Inputs: Block number
4030 * Pointer to Garbage Collect flag
4031 * Outputs:
4032 * Description: Determine the least weared block by traversing
4033 * block table
4034 * Set Garbage collection to be called if number of spare
4035 * block is less than Free Block Gate count
4036 * Change Block table entry to map least worn block for current
4037 * operation
4038 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4039 static u32 FTL_Replace_LWBlock(u32 wBlockNum, int *pGarbageCollect)
4040 {
4041 u32 i;
4042 u32 *pbt = (u32 *)g_pBlockTable;
4043 u8 wLeastWornCounter = 0xFF;
4044 u32 wLeastWornIndex = BAD_BLOCK;
4045 u32 wSpareBlockNum = 0;
4046 u32 wDiscardBlockNum = 0;
4047
4048 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
4049 __FILE__, __LINE__, __func__);
4050
4051 if (IS_SPARE_BLOCK(wBlockNum)) {
4052 *pGarbageCollect = FAIL;
4053 pbt[wBlockNum] = (u32)(pbt[wBlockNum] & (~SPARE_BLOCK));
4054 #if CMD_DMA
4055 p_BTableChangesDelta =
4056 (struct BTableChangesDelta *)g_pBTDelta_Free;
4057 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4058 p_BTableChangesDelta->ftl_cmd_cnt =
4059 ftl_cmd_cnt;
4060 p_BTableChangesDelta->BT_Index = (u32)(wBlockNum);
4061 p_BTableChangesDelta->BT_Entry_Value = pbt[wBlockNum];
4062 p_BTableChangesDelta->ValidFields = 0x0C;
4063 #endif
4064 return pbt[wBlockNum];
4065 }
4066
4067 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
4068 if (IS_DISCARDED_BLOCK(i))
4069 wDiscardBlockNum++;
4070
4071 if (IS_SPARE_BLOCK(i)) {
4072 u32 wPhysicalIndex = (u32)((~BAD_BLOCK) & pbt[i]);
4073 if (wPhysicalIndex > DeviceInfo.wSpectraEndBlock)
4074 printk(KERN_ERR "FTL_Replace_LWBlock: "
4075 "This should never occur!\n");
4076 if (g_pWearCounter[wPhysicalIndex -
4077 DeviceInfo.wSpectraStartBlock] <
4078 wLeastWornCounter) {
4079 wLeastWornCounter =
4080 g_pWearCounter[wPhysicalIndex -
4081 DeviceInfo.wSpectraStartBlock];
4082 wLeastWornIndex = i;
4083 }
4084 wSpareBlockNum++;
4085 }
4086 }
4087
4088 nand_dbg_print(NAND_DBG_WARN,
4089 "FTL_Replace_LWBlock: Least Worn Counter %d\n",
4090 (int)wLeastWornCounter);
4091
4092 if ((wDiscardBlockNum >= NUM_FREE_BLOCKS_GATE) ||
4093 (wSpareBlockNum <= NUM_FREE_BLOCKS_GATE))
4094 *pGarbageCollect = PASS;
4095 else
4096 *pGarbageCollect = FAIL;
4097
4098 nand_dbg_print(NAND_DBG_DEBUG,
4099 "FTL_Replace_LWBlock: Discarded Blocks %u Spare"
4100 " Blocks %u\n",
4101 (unsigned int)wDiscardBlockNum,
4102 (unsigned int)wSpareBlockNum);
4103
4104 return FTL_Replace_OneBlock(wBlockNum, wLeastWornIndex);
4105 }
4106
4107 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4108 * Function: FTL_Replace_MWBlock
4109 * Inputs: None
4110 * Outputs: most worn spare block no./BAD_BLOCK
4111 * Description: It finds most worn spare block.
4112 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4113 static u32 FTL_Replace_MWBlock(void)
4114 {
4115 u32 i;
4116 u32 *pbt = (u32 *)g_pBlockTable;
4117 u8 wMostWornCounter = 0;
4118 u32 wMostWornIndex = BAD_BLOCK;
4119 u32 wSpareBlockNum = 0;
4120
4121 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
4122 __FILE__, __LINE__, __func__);
4123
4124 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
4125 if (IS_SPARE_BLOCK(i)) {
4126 u32 wPhysicalIndex = (u32)((~SPARE_BLOCK) & pbt[i]);
4127 if (g_pWearCounter[wPhysicalIndex -
4128 DeviceInfo.wSpectraStartBlock] >
4129 wMostWornCounter) {
4130 wMostWornCounter =
4131 g_pWearCounter[wPhysicalIndex -
4132 DeviceInfo.wSpectraStartBlock];
4133 wMostWornIndex = wPhysicalIndex;
4134 }
4135 wSpareBlockNum++;
4136 }
4137 }
4138
4139 if (wSpareBlockNum <= 2)
4140 return BAD_BLOCK;
4141
4142 return wMostWornIndex;
4143 }
4144
4145 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4146 * Function: FTL_Replace_Block
4147 * Inputs: Block Address
4148 * Outputs: PASS=0 / FAIL=1
4149 * Description: If block specified by blk_addr parameter is not free,
4150 * replace it with the least worn block.
4151 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4152 static int FTL_Replace_Block(u64 blk_addr)
4153 {
4154 u32 current_blk = BLK_FROM_ADDR(blk_addr);
4155 u32 *pbt = (u32 *)g_pBlockTable;
4156 int wResult = PASS;
4157 int GarbageCollect = FAIL;
4158
4159 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
4160 __FILE__, __LINE__, __func__);
4161
4162 if (IS_SPARE_BLOCK(current_blk)) {
4163 pbt[current_blk] = (~SPARE_BLOCK) & pbt[current_blk];
4164 #if CMD_DMA
4165 p_BTableChangesDelta =
4166 (struct BTableChangesDelta *)g_pBTDelta_Free;
4167 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4168 p_BTableChangesDelta->ftl_cmd_cnt =
4169 ftl_cmd_cnt;
4170 p_BTableChangesDelta->BT_Index = current_blk;
4171 p_BTableChangesDelta->BT_Entry_Value = pbt[current_blk];
4172 p_BTableChangesDelta->ValidFields = 0x0C ;
4173 #endif
4174 return wResult;
4175 }
4176
4177 FTL_Replace_LWBlock(current_blk, &GarbageCollect);
4178
4179 if (PASS == GarbageCollect)
4180 wResult = GLOB_FTL_Garbage_Collection();
4181
4182 return wResult;
4183 }
4184
4185 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4186 * Function: GLOB_FTL_Is_BadBlock
4187 * Inputs: block number to test
4188 * Outputs: PASS (block is BAD) / FAIL (block is not bad)
4189 * Description: test if this block number is flagged as bad
4190 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4191 int GLOB_FTL_Is_BadBlock(u32 wBlockNum)
4192 {
4193 u32 *pbt = (u32 *)g_pBlockTable;
4194
4195 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
4196 __FILE__, __LINE__, __func__);
4197
4198 if (wBlockNum >= DeviceInfo.wSpectraStartBlock
4199 && BAD_BLOCK == (pbt[wBlockNum] & BAD_BLOCK))
4200 return PASS;
4201 else
4202 return FAIL;
4203 }
4204
4205 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4206 * Function: GLOB_FTL_Flush_Cache
4207 * Inputs: none
4208 * Outputs: PASS=0 / FAIL=1
4209 * Description: flush all the cache blocks to flash
4210 * if a cache block is not dirty, don't do anything with it
4211 * else, write the block and update the block table
4212 * Note: This function should be called at shutdown/power down.
4213 * to write important data into device
4214 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4215 int GLOB_FTL_Flush_Cache(void)
4216 {
4217 int i, ret;
4218
4219 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
4220 __FILE__, __LINE__, __func__);
4221
4222 for (i = 0; i < CACHE_ITEM_NUM; i++) {
4223 if (SET == Cache.array[i].changed) {
4224 #if CMD_DMA
4225 #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
4226 int_cache[ftl_cmd_cnt].item = i;
4227 int_cache[ftl_cmd_cnt].cache.address =
4228 Cache.array[i].address;
4229 int_cache[ftl_cmd_cnt].cache.changed = CLEAR;
4230 #endif
4231 #endif
4232 ret = write_back_to_l2_cache(Cache.array[i].buf, Cache.array[i].address);
4233 if (PASS == ret) {
4234 Cache.array[i].changed = CLEAR;
4235 } else {
4236 printk(KERN_ALERT "Failed when write back to L2 cache!\n");
4237 /* TODO - How to handle this? */
4238 }
4239 }
4240 }
4241
4242 flush_l2_cache();
4243
4244 return FTL_Write_Block_Table(FAIL);
4245 }
4246
4247 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4248 * Function: GLOB_FTL_Page_Read
4249 * Inputs: pointer to data
4250 * logical address of data (u64 is LBA * Bytes/Page)
4251 * Outputs: PASS=0 / FAIL=1
4252 * Description: reads a page of data into RAM from the cache
4253 * if the data is not already in cache, read from flash to cache
4254 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4255 int GLOB_FTL_Page_Read(u8 *data, u64 logical_addr)
4256 {
4257 u16 cache_item;
4258 int res = PASS;
4259
4260 nand_dbg_print(NAND_DBG_DEBUG, "GLOB_FTL_Page_Read - "
4261 "page_addr: %llu\n", logical_addr);
4262
4263 cache_item = FTL_Cache_If_Hit(logical_addr);
4264
4265 if (UNHIT_CACHE_ITEM == cache_item) {
4266 nand_dbg_print(NAND_DBG_DEBUG,
4267 "GLOB_FTL_Page_Read: Cache not hit\n");
4268 res = FTL_Cache_Write();
4269 if (ERR == FTL_Cache_Read(logical_addr))
4270 res = ERR;
4271 cache_item = Cache.LRU;
4272 }
4273
4274 FTL_Cache_Read_Page(data, logical_addr, cache_item);
4275
4276 return res;
4277 }
4278
4279 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4280 * Function: GLOB_FTL_Page_Write
4281 * Inputs: pointer to data
4282 * address of data (ADDRESSTYPE is LBA * Bytes/Page)
4283 * Outputs: PASS=0 / FAIL=1
4284 * Description: writes a page of data from RAM to the cache
4285 * if the data is not already in cache, write back the
4286 * least recently used block and read the addressed block
4287 * from flash to cache
4288 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4289 int GLOB_FTL_Page_Write(u8 *pData, u64 dwPageAddr)
4290 {
4291 u16 cache_blk;
4292 u32 *pbt = (u32 *)g_pBlockTable;
4293 int wResult = PASS;
4294
4295 nand_dbg_print(NAND_DBG_TRACE, "GLOB_FTL_Page_Write - "
4296 "dwPageAddr: %llu\n", dwPageAddr);
4297
4298 cache_blk = FTL_Cache_If_Hit(dwPageAddr);
4299
4300 if (UNHIT_CACHE_ITEM == cache_blk) {
4301 wResult = FTL_Cache_Write();
4302 if (IS_BAD_BLOCK(BLK_FROM_ADDR(dwPageAddr))) {
4303 wResult = FTL_Replace_Block(dwPageAddr);
4304 pbt[BLK_FROM_ADDR(dwPageAddr)] |= SPARE_BLOCK;
4305 if (wResult == FAIL)
4306 return FAIL;
4307 }
4308 if (ERR == FTL_Cache_Read(dwPageAddr))
4309 wResult = ERR;
4310 cache_blk = Cache.LRU;
4311 FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, 0);
4312 } else {
4313 #if CMD_DMA
4314 FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk,
4315 LLD_CMD_FLAG_ORDER_BEFORE_REST);
4316 #else
4317 FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, 0);
4318 #endif
4319 }
4320
4321 return wResult;
4322 }
4323
4324 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4325 * Function: GLOB_FTL_Block_Erase
4326 * Inputs: address of block to erase (now in byte format, should change to
4327 * block format)
4328 * Outputs: PASS=0 / FAIL=1
4329 * Description: erases the specified block
4330 * increments the erase count
4331 * If erase count reaches its upper limit,call function to
4332 * do the ajustment as per the relative erase count values
4333 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4334 int GLOB_FTL_Block_Erase(u64 blk_addr)
4335 {
4336 int status;
4337 u32 BlkIdx;
4338
4339 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
4340 __FILE__, __LINE__, __func__);
4341
4342 BlkIdx = (u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize);
4343
4344 if (BlkIdx < DeviceInfo.wSpectraStartBlock) {
4345 printk(KERN_ERR "GLOB_FTL_Block_Erase: "
4346 "This should never occur\n");
4347 return FAIL;
4348 }
4349
4350 #if CMD_DMA
4351 status = GLOB_LLD_Erase_Block_cdma(BlkIdx, LLD_CMD_FLAG_MODE_CDMA);
4352 if (status == FAIL)
4353 nand_dbg_print(NAND_DBG_WARN,
4354 "NAND Program fail in %s, Line %d, "
4355 "Function: %s, new Bad Block %d generated!\n",
4356 __FILE__, __LINE__, __func__, BlkIdx);
4357 #else
4358 status = GLOB_LLD_Erase_Block(BlkIdx);
4359 if (status == FAIL) {
4360 nand_dbg_print(NAND_DBG_WARN,
4361 "NAND Program fail in %s, Line %d, "
4362 "Function: %s, new Bad Block %d generated!\n",
4363 __FILE__, __LINE__, __func__, BlkIdx);
4364 return status;
4365 }
4366 #endif
4367
4368 if (DeviceInfo.MLCDevice) {
4369 g_pReadCounter[BlkIdx - DeviceInfo.wSpectraStartBlock] = 0;
4370 if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
4371 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
4372 FTL_Write_IN_Progress_Block_Table_Page();
4373 }
4374 }
4375
4376 g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock]++;
4377
4378 #if CMD_DMA
4379 p_BTableChangesDelta =
4380 (struct BTableChangesDelta *)g_pBTDelta_Free;
4381 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4382 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
4383 p_BTableChangesDelta->WC_Index =
4384 BlkIdx - DeviceInfo.wSpectraStartBlock;
4385 p_BTableChangesDelta->WC_Entry_Value =
4386 g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock];
4387 p_BTableChangesDelta->ValidFields = 0x30;
4388
4389 if (DeviceInfo.MLCDevice) {
4390 p_BTableChangesDelta =
4391 (struct BTableChangesDelta *)g_pBTDelta_Free;
4392 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4393 p_BTableChangesDelta->ftl_cmd_cnt =
4394 ftl_cmd_cnt;
4395 p_BTableChangesDelta->RC_Index =
4396 BlkIdx - DeviceInfo.wSpectraStartBlock;
4397 p_BTableChangesDelta->RC_Entry_Value =
4398 g_pReadCounter[BlkIdx -
4399 DeviceInfo.wSpectraStartBlock];
4400 p_BTableChangesDelta->ValidFields = 0xC0;
4401 }
4402
4403 ftl_cmd_cnt++;
4404 #endif
4405
4406 if (g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock] == 0xFE)
4407 FTL_Adjust_Relative_Erase_Count(BlkIdx);
4408
4409 return status;
4410 }
4411
4412
4413 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4414 * Function: FTL_Adjust_Relative_Erase_Count
4415 * Inputs: index to block that was just incremented and is at the max
4416 * Outputs: PASS=0 / FAIL=1
4417 * Description: If any erase counts at MAX, adjusts erase count of every
4418 * block by substracting least worn
4419 * counter from counter value of every entry in wear table
4420 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4421 static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX)
4422 {
4423 u8 wLeastWornCounter = MAX_BYTE_VALUE;
4424 u8 wWearCounter;
4425 u32 i, wWearIndex;
4426 u32 *pbt = (u32 *)g_pBlockTable;
4427 int wResult = PASS;
4428
4429 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
4430 __FILE__, __LINE__, __func__);
4431
4432 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
4433 if (IS_BAD_BLOCK(i))
4434 continue;
4435 wWearIndex = (u32)(pbt[i] & (~BAD_BLOCK));
4436
4437 if ((wWearIndex - DeviceInfo.wSpectraStartBlock) < 0)
4438 printk(KERN_ERR "FTL_Adjust_Relative_Erase_Count:"
4439 "This should never occur\n");
4440 wWearCounter = g_pWearCounter[wWearIndex -
4441 DeviceInfo.wSpectraStartBlock];
4442 if (wWearCounter < wLeastWornCounter)
4443 wLeastWornCounter = wWearCounter;
4444 }
4445
4446 if (wLeastWornCounter == 0) {
4447 nand_dbg_print(NAND_DBG_WARN,
4448 "Adjusting Wear Levelling Counters: Special Case\n");
4449 g_pWearCounter[Index_of_MAX -
4450 DeviceInfo.wSpectraStartBlock]--;
4451 #if CMD_DMA
4452 p_BTableChangesDelta =
4453 (struct BTableChangesDelta *)g_pBTDelta_Free;
4454 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4455 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
4456 p_BTableChangesDelta->WC_Index =
4457 Index_of_MAX - DeviceInfo.wSpectraStartBlock;
4458 p_BTableChangesDelta->WC_Entry_Value =
4459 g_pWearCounter[Index_of_MAX -
4460 DeviceInfo.wSpectraStartBlock];
4461 p_BTableChangesDelta->ValidFields = 0x30;
4462 #endif
4463 FTL_Static_Wear_Leveling();
4464 } else {
4465 for (i = 0; i < DeviceInfo.wDataBlockNum; i++)
4466 if (!IS_BAD_BLOCK(i)) {
4467 wWearIndex = (u32)(pbt[i] & (~BAD_BLOCK));
4468 g_pWearCounter[wWearIndex -
4469 DeviceInfo.wSpectraStartBlock] =
4470 (u8)(g_pWearCounter
4471 [wWearIndex -
4472 DeviceInfo.wSpectraStartBlock] -
4473 wLeastWornCounter);
4474 #if CMD_DMA
4475 p_BTableChangesDelta =
4476 (struct BTableChangesDelta *)g_pBTDelta_Free;
4477 g_pBTDelta_Free +=
4478 sizeof(struct BTableChangesDelta);
4479
4480 p_BTableChangesDelta->ftl_cmd_cnt =
4481 ftl_cmd_cnt;
4482 p_BTableChangesDelta->WC_Index = wWearIndex -
4483 DeviceInfo.wSpectraStartBlock;
4484 p_BTableChangesDelta->WC_Entry_Value =
4485 g_pWearCounter[wWearIndex -
4486 DeviceInfo.wSpectraStartBlock];
4487 p_BTableChangesDelta->ValidFields = 0x30;
4488 #endif
4489 }
4490 }
4491
4492 return wResult;
4493 }
4494
4495 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4496 * Function: FTL_Write_IN_Progress_Block_Table_Page
4497 * Inputs: None
4498 * Outputs: None
4499 * Description: It writes in-progress flag page to the page next to
4500 * block table
4501 ***********************************************************************/
4502 static int FTL_Write_IN_Progress_Block_Table_Page(void)
4503 {
4504 int wResult = PASS;
4505 u16 bt_pages;
4506 u16 dwIPFPageAddr;
4507 #if CMD_DMA
4508 #else
4509 u32 *pbt = (u32 *)g_pBlockTable;
4510 u32 wTempBlockTableIndex;
4511 #endif
4512
4513 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
4514 __FILE__, __LINE__, __func__);
4515
4516 bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
4517
4518 dwIPFPageAddr = g_wBlockTableOffset + bt_pages;
4519
4520 nand_dbg_print(NAND_DBG_DEBUG, "Writing IPF at "
4521 "Block %d Page %d\n",
4522 g_wBlockTableIndex, dwIPFPageAddr);
4523
4524 #if CMD_DMA
4525 wResult = GLOB_LLD_Write_Page_Main_Spare_cdma(g_pIPF,
4526 g_wBlockTableIndex, dwIPFPageAddr, 1,
4527 LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST);
4528 if (wResult == FAIL) {
4529 nand_dbg_print(NAND_DBG_WARN,
4530 "NAND Program fail in %s, Line %d, "
4531 "Function: %s, new Bad Block %d generated!\n",
4532 __FILE__, __LINE__, __func__,
4533 g_wBlockTableIndex);
4534 }
4535 g_wBlockTableOffset = dwIPFPageAddr + 1;
4536 p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free;
4537 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4538 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
4539 p_BTableChangesDelta->g_wBlockTableOffset = g_wBlockTableOffset;
4540 p_BTableChangesDelta->ValidFields = 0x01;
4541 ftl_cmd_cnt++;
4542 #else
4543 wResult = GLOB_LLD_Write_Page_Main_Spare(g_pIPF,
4544 g_wBlockTableIndex, dwIPFPageAddr, 1);
4545 if (wResult == FAIL) {
4546 nand_dbg_print(NAND_DBG_WARN,
4547 "NAND Program fail in %s, Line %d, "
4548 "Function: %s, new Bad Block %d generated!\n",
4549 __FILE__, __LINE__, __func__,
4550 (int)g_wBlockTableIndex);
4551 MARK_BLOCK_AS_BAD(pbt[BLOCK_TABLE_INDEX]);
4552 wTempBlockTableIndex = FTL_Replace_Block_Table();
4553 bt_block_changed = 1;
4554 if (BAD_BLOCK == wTempBlockTableIndex)
4555 return ERR;
4556 g_wBlockTableIndex = wTempBlockTableIndex;
4557 g_wBlockTableOffset = 0;
4558 /* Block table tag is '00'. Means it's used one */
4559 pbt[BLOCK_TABLE_INDEX] = g_wBlockTableIndex;
4560 return FAIL;
4561 }
4562 g_wBlockTableOffset = dwIPFPageAddr + 1;
4563 #endif
4564 return wResult;
4565 }
4566
4567 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4568 * Function: FTL_Read_Disturbance
4569 * Inputs: block address
4570 * Outputs: PASS=0 / FAIL=1
4571 * Description: used to handle read disturbance. Data in block that
4572 * reaches its read limit is moved to new block
4573 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4574 int FTL_Read_Disturbance(u32 blk_addr)
4575 {
4576 int wResult = FAIL;
4577 u32 *pbt = (u32 *) g_pBlockTable;
4578 u32 dwOldBlockAddr = blk_addr;
4579 u32 wBlockNum;
4580 u32 i;
4581 u32 wLeastReadCounter = 0xFFFF;
4582 u32 wLeastReadIndex = BAD_BLOCK;
4583 u32 wSpareBlockNum = 0;
4584 u32 wTempNode;
4585 u32 wReplacedNode;
4586 u8 *g_pTempBuf;
4587
4588 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
4589 __FILE__, __LINE__, __func__);
4590
4591 #if CMD_DMA
4592 g_pTempBuf = cp_back_buf_copies[cp_back_buf_idx];
4593 cp_back_buf_idx++;
4594 if (cp_back_buf_idx > COPY_BACK_BUF_NUM) {
4595 printk(KERN_ERR "cp_back_buf_copies overflow! Exit."
4596 "Maybe too many pending commands in your CDMA chain.\n");
4597 return FAIL;
4598 }
4599 #else
4600 g_pTempBuf = tmp_buf_read_disturbance;
4601 #endif
4602
4603 wBlockNum = FTL_Get_Block_Index(blk_addr);
4604
4605 do {
4606 /* This is a bug.Here 'i' should be logical block number
4607 * and start from 1 (0 is reserved for block table).
4608 * Have fixed it. - Yunpeng 2008. 12. 19
4609 */
4610 for (i = 1; i < DeviceInfo.wDataBlockNum; i++) {
4611 if (IS_SPARE_BLOCK(i)) {
4612 u32 wPhysicalIndex =
4613 (u32)((~SPARE_BLOCK) & pbt[i]);
4614 if (g_pReadCounter[wPhysicalIndex -
4615 DeviceInfo.wSpectraStartBlock] <
4616 wLeastReadCounter) {
4617 wLeastReadCounter =
4618 g_pReadCounter[wPhysicalIndex -
4619 DeviceInfo.wSpectraStartBlock];
4620 wLeastReadIndex = i;
4621 }
4622 wSpareBlockNum++;
4623 }
4624 }
4625
4626 if (wSpareBlockNum <= NUM_FREE_BLOCKS_GATE) {
4627 wResult = GLOB_FTL_Garbage_Collection();
4628 if (PASS == wResult)
4629 continue;
4630 else
4631 break;
4632 } else {
4633 wTempNode = (u32)(DISCARD_BLOCK | pbt[wBlockNum]);
4634 wReplacedNode = (u32)((~SPARE_BLOCK) &
4635 pbt[wLeastReadIndex]);
4636 #if CMD_DMA
4637 pbt[wBlockNum] = wReplacedNode;
4638 pbt[wLeastReadIndex] = wTempNode;
4639 p_BTableChangesDelta =
4640 (struct BTableChangesDelta *)g_pBTDelta_Free;
4641 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4642
4643 p_BTableChangesDelta->ftl_cmd_cnt =
4644 ftl_cmd_cnt;
4645 p_BTableChangesDelta->BT_Index = wBlockNum;
4646 p_BTableChangesDelta->BT_Entry_Value = pbt[wBlockNum];
4647 p_BTableChangesDelta->ValidFields = 0x0C;
4648
4649 p_BTableChangesDelta =
4650 (struct BTableChangesDelta *)g_pBTDelta_Free;
4651 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4652
4653 p_BTableChangesDelta->ftl_cmd_cnt =
4654 ftl_cmd_cnt;
4655 p_BTableChangesDelta->BT_Index = wLeastReadIndex;
4656 p_BTableChangesDelta->BT_Entry_Value =
4657 pbt[wLeastReadIndex];
4658 p_BTableChangesDelta->ValidFields = 0x0C;
4659
4660 wResult = GLOB_LLD_Read_Page_Main_cdma(g_pTempBuf,
4661 dwOldBlockAddr, 0, DeviceInfo.wPagesPerBlock,
4662 LLD_CMD_FLAG_MODE_CDMA);
4663 if (wResult == FAIL)
4664 return wResult;
4665
4666 ftl_cmd_cnt++;
4667
4668 if (wResult != FAIL) {
4669 if (FAIL == GLOB_LLD_Write_Page_Main_cdma(
4670 g_pTempBuf, pbt[wBlockNum], 0,
4671 DeviceInfo.wPagesPerBlock)) {
4672 nand_dbg_print(NAND_DBG_WARN,
4673 "NAND Program fail in "
4674 "%s, Line %d, Function: %s, "
4675 "new Bad Block %d "
4676 "generated!\n",
4677 __FILE__, __LINE__, __func__,
4678 (int)pbt[wBlockNum]);
4679 wResult = FAIL;
4680 MARK_BLOCK_AS_BAD(pbt[wBlockNum]);
4681 }
4682 ftl_cmd_cnt++;
4683 }
4684 #else
4685 wResult = GLOB_LLD_Read_Page_Main(g_pTempBuf,
4686 dwOldBlockAddr, 0, DeviceInfo.wPagesPerBlock);
4687 if (wResult == FAIL)
4688 return wResult;
4689
4690 if (wResult != FAIL) {
4691 /* This is a bug. At this time, pbt[wBlockNum]
4692 is still the physical address of
4693 discard block, and should not be write.
4694 Have fixed it as below.
4695 -- Yunpeng 2008.12.19
4696 */
4697 wResult = GLOB_LLD_Write_Page_Main(g_pTempBuf,
4698 wReplacedNode, 0,
4699 DeviceInfo.wPagesPerBlock);
4700 if (wResult == FAIL) {
4701 nand_dbg_print(NAND_DBG_WARN,
4702 "NAND Program fail in "
4703 "%s, Line %d, Function: %s, "
4704 "new Bad Block %d "
4705 "generated!\n",
4706 __FILE__, __LINE__, __func__,
4707 (int)wReplacedNode);
4708 MARK_BLOCK_AS_BAD(wReplacedNode);
4709 } else {
4710 pbt[wBlockNum] = wReplacedNode;
4711 pbt[wLeastReadIndex] = wTempNode;
4712 }
4713 }
4714
4715 if ((wResult == PASS) && (g_cBlockTableStatus !=
4716 IN_PROGRESS_BLOCK_TABLE)) {
4717 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
4718 FTL_Write_IN_Progress_Block_Table_Page();
4719 }
4720 #endif
4721 }
4722 } while (wResult != PASS)
4723 ;
4724
4725 #if CMD_DMA
4726 /* ... */
4727 #endif
4728
4729 return wResult;
4730 }
4731