2 #include <linux/module.h>
3 #include <linux/slab.h>
9 u8 bad_count
; // bad block count in pool
10 u8 mapped_count
; // mapped block count in pool
17 phys_bmt_header header
;
18 bmt_entry table
[MAX_BMT_SIZE
];
26 static char MAIN_SIGNATURE
[] = "BMT";
27 static char OOB_SIGNATURE
[] = "bmt";
28 #define SIGNATURE_SIZE (3)
30 #define MAX_DAT_SIZE 0x4000
31 #define MAX_OOB_SIZE 0x800
33 static struct mtd_info
*mtd_bmt
;
34 static struct nand_chip
*nand_chip_bmt
;
35 #define BLOCK_SIZE_BMT (1 << nand_chip_bmt->phys_erase_shift)
36 #define PAGE_SIZE_BMT (1 << nand_chip_bmt->page_shift)
37 #define PAGE_PER_SIZE_BMT (1 << (nand_chip_bmt->phys_erase_shift-nand_chip_bmt->page_shift))
39 #define OFFSET(block) (((u64)block) * BLOCK_SIZE_BMT)
40 #define PAGE_ADDR(block) ((block) * PAGE_PER_SIZE_BMT)
42 /*********************************************************************
43 * Flash is splited into 2 parts, system part is for normal system *
44 * system usage, size is system_block_count, another is replace pool *
45 * +-------------------------------------------------+ *
46 * | system_block_count | bmt_block_count | *
47 * +-------------------------------------------------+ *
48 *********************************************************************/
49 static u32 total_block_count
; // block number in flash
50 static u32 system_block_count
;
51 static int bmt_block_count
; // bmt table size
52 // static int bmt_count; // block used in bmt
53 static int page_per_block
; // page per count
55 static u32 bmt_block_index
; // bmt block index
56 static bmt_struct bmt
; // dynamic created global bmt table
58 static u8 dat_buf
[MAX_DAT_SIZE
];
59 static u8 oob_buf
[MAX_OOB_SIZE
];
60 static bool pool_erased
;
62 /***************************************************************
64 * Interface adaptor for preloader/uboot/kernel
65 * These interfaces operate on physical address, read/write
68 ***************************************************************/
69 int nand_read_page_bmt(u32 page
, u8
* dat
, u8
* oob
)
71 return mtk_nand_exec_read_page(mtd_bmt
, page
, PAGE_SIZE_BMT
, dat
, oob
);
74 bool nand_block_bad_bmt(u64 offset
)
76 return mtk_nand_block_bad_hw(mtd_bmt
, offset
);
79 bool nand_erase_bmt(u64 offset
)
84 MSG(INIT
, "erase offset: 0x%llx\n", offset
);
87 status
= mtk_nand_erase_hw(mtd_bmt
, (u32
)(offset
>> nand_chip_bmt
->page_shift
)); // as nand_chip structure doesn't have a erase function defined
88 if (status
& NAND_STATUS_FAIL
)
94 int mark_block_bad_bmt(u64 offset
)
96 return mtk_nand_block_markbad_hw(mtd_bmt
, offset
); //mark_block_bad_hw(offset);
99 bool nand_write_page_bmt(u32 page
, u8
* dat
, u8
* oob
)
101 //printk("[xiaolei] nand_write_page_bmt 0x%x\n", (u32)dat);
102 if (mtk_nand_exec_write_page(mtd_bmt
, page
, PAGE_SIZE_BMT
, dat
, oob
))
108 /***************************************************************
110 * static internal function *
112 ***************************************************************/
113 static void dump_bmt_info(bmt_struct
* bmt
)
117 MSG(INIT
, "BMT v%d. total %d mapping:\n", bmt
->version
, bmt
->mapped_count
);
118 for (i
= 0; i
< bmt
->mapped_count
; i
++)
120 MSG(INIT
, "\t0x%x -> 0x%x\n", bmt
->table
[i
].bad_index
, bmt
->table
[i
].mapped_index
);
124 static bool match_bmt_signature(u8
* dat
, u8
* oob
)
127 if (memcmp(dat
+ MAIN_SIGNATURE_OFFSET
, MAIN_SIGNATURE
, SIGNATURE_SIZE
))
132 if (memcmp(oob
+ OOB_SIGNATURE_OFFSET
, OOB_SIGNATURE
, SIGNATURE_SIZE
))
134 MSG(INIT
, "main signature match, oob signature doesn't match, but ignore\n");
139 static u8
cal_bmt_checksum(phys_bmt_struct
* phys_table
, int bmt_size
)
143 u8
*dat
= (u8
*) phys_table
;
145 checksum
+= phys_table
->header
.version
;
146 checksum
+= phys_table
->header
.mapped_count
;
148 dat
+= sizeof(phys_bmt_header
);
149 for (i
= 0; i
< bmt_size
* sizeof(bmt_entry
); i
++)
158 static int is_block_mapped(int index
)
161 for (i
= 0; i
< bmt
.mapped_count
; i
++)
163 if (index
== bmt
.table
[i
].mapped_index
)
169 static bool is_page_used(u8
* dat
, u8
* oob
)
171 if (2048 == PAGE_SIZE_BMT
)
173 return ((oob
[13] != 0xFF) || (oob
[14] != 0xFF));
176 return ((oob
[OOB_INDEX_OFFSET
] != 0xFF) || (oob
[OOB_INDEX_OFFSET
+ 1] != 0xFF));
180 static bool valid_bmt_data(phys_bmt_struct
* phys_table
)
183 u8 checksum
= cal_bmt_checksum(phys_table
, bmt_block_count
);
186 if (phys_table
->header
.checksum
!= checksum
)
188 MSG(INIT
, "BMT Data checksum error: %x %x\n", phys_table
->header
.checksum
, checksum
);
192 MSG(INIT
, "BMT Checksum is: 0x%x\n", phys_table
->header
.checksum
);
194 // block index correct?
195 for (i
= 0; i
< phys_table
->header
.mapped_count
; i
++)
197 if (phys_table
->table
[i
].bad_index
>= total_block_count
|| phys_table
->table
[i
].mapped_index
>= total_block_count
|| phys_table
->table
[i
].mapped_index
< system_block_count
)
199 MSG(INIT
, "index error: bad_index: %d, mapped_index: %d\n", phys_table
->table
[i
].bad_index
, phys_table
->table
[i
].mapped_index
);
204 // pass check, valid bmt.
205 MSG(INIT
, "Valid BMT, version v%d\n", phys_table
->header
.version
);
209 static void fill_nand_bmt_buffer(bmt_struct
* bmt
, u8
* dat
, u8
* oob
)
211 phys_bmt_struct
*phys_bmt
= NULL
;
213 phys_bmt
= (phys_bmt_struct
*)kmalloc(sizeof(phys_bmt_struct
),GFP_KERNEL
);
217 printk("[fill_nand_bmt_buffer]kmalloc phys_bmt_struct fail!\n");
223 // fill phys_bmt_struct structure with bmt_struct
224 memset(phys_bmt
, 0xFF, sizeof(phys_bmt_struct
));
226 memcpy(phys_bmt
->header
.signature
, MAIN_SIGNATURE
, SIGNATURE_SIZE
);
227 phys_bmt
->header
.version
= BMT_VERSION
;
228 // phys_bmt.header.bad_count = bmt->bad_count;
229 phys_bmt
->header
.mapped_count
= bmt
->mapped_count
;
230 memcpy(phys_bmt
->table
, bmt
->table
, sizeof(bmt_entry
) * bmt_block_count
);
232 phys_bmt
->header
.checksum
= cal_bmt_checksum(phys_bmt
, bmt_block_count
);
234 memcpy(dat
+ MAIN_SIGNATURE_OFFSET
, phys_bmt
, sizeof(phys_bmt_struct
));
235 memcpy(oob
+ OOB_SIGNATURE_OFFSET
, OOB_SIGNATURE
, SIGNATURE_SIZE
);
239 // return valid index if found BMT, else return 0
240 static int load_bmt_data(int start
, int pool_size
)
242 int bmt_index
= start
+ pool_size
- 1; // find from the end
243 phys_bmt_struct
*phys_table
= NULL
;
245 phys_table
= (phys_bmt_struct
*)kmalloc(sizeof(phys_bmt_struct
),GFP_KERNEL
);
249 printk("[load_bmt_data]kmalloc phys_bmt_struct fail!\n");
252 MSG(INIT
, "[%s]: begin to search BMT from block 0x%x\n", __FUNCTION__
, bmt_index
);
254 for (bmt_index
= start
+ pool_size
- 1; bmt_index
>= start
; bmt_index
--)
256 if (nand_block_bad_bmt(OFFSET(bmt_index
)))
258 MSG(INIT
, "Skip bad block: %d\n", bmt_index
);
262 if (!nand_read_page_bmt(PAGE_ADDR(bmt_index
), dat_buf
, oob_buf
))
264 MSG(INIT
, "Error found when read block %d\n", bmt_index
);
268 if (!match_bmt_signature(dat_buf
, oob_buf
))
273 MSG(INIT
, "Match bmt signature @ block: 0x%x\n", bmt_index
);
275 memcpy(phys_table
, dat_buf
+ MAIN_SIGNATURE_OFFSET
, sizeof(phys_bmt_struct
));
277 if (!valid_bmt_data(phys_table
))
279 MSG(INIT
, "BMT data is not correct %d\n", bmt_index
);
283 bmt
.mapped_count
= phys_table
->header
.mapped_count
;
284 bmt
.version
= phys_table
->header
.version
;
285 // bmt.bad_count = phys_table.header.bad_count;
286 memcpy(bmt
.table
, phys_table
->table
, bmt
.mapped_count
* sizeof(bmt_entry
));
288 MSG(INIT
, "bmt found at block: %d, mapped block: %d\n", bmt_index
, bmt
.mapped_count
);
290 for (i
= 0; i
< bmt
.mapped_count
; i
++)
292 if (!nand_block_bad_bmt(OFFSET(bmt
.table
[i
].bad_index
)))
294 MSG(INIT
, "block 0x%x is not mark bad, should be power lost last time\n", bmt
.table
[i
].bad_index
);
295 mark_block_bad_bmt(OFFSET(bmt
.table
[i
].bad_index
));
303 MSG(INIT
, "bmt block not found!\n");
308 /*************************************************************************
309 * Find an available block and erase. *
310 * start_from_end: if true, find available block from end of flash. *
311 * else, find from the beginning of the pool *
312 * need_erase: if true, all unmapped blocks in the pool will be erased *
313 *************************************************************************/
314 static int find_available_block(bool start_from_end
)
317 int block
= system_block_count
;
319 // int avail_index = 0;
320 MSG(INIT
, "Try to find_available_block, pool_erase: %d\n", pool_erased
);
322 // erase all un-mapped blocks in pool when finding avaliable block
325 MSG(INIT
, "Erase all un-mapped blocks in pool\n");
326 for (i
= 0; i
< bmt_block_count
; i
++)
328 if (block
== bmt_block_index
)
330 MSG(INIT
, "Skip bmt block 0x%x\n", block
);
334 if (nand_block_bad_bmt(OFFSET(block
+ i
)))
336 MSG(INIT
, "Skip bad block 0x%x\n", block
+ i
);
344 if (is_block_mapped(block
+ i
) >= 0)
346 MSG(INIT
, "Skip mapped block 0x%x\n", block
+ i
);
350 if (!nand_erase_bmt(OFFSET(block
+ i
)))
352 MSG(INIT
, "Erase block 0x%x failed\n", block
+ i
);
353 mark_block_bad_bmt(OFFSET(block
+ i
));
362 block
= total_block_count
- 1;
366 block
= system_block_count
;
370 for (i
= 0; i
< bmt_block_count
; i
++, block
+= direction
)
372 if (block
== bmt_block_index
)
374 MSG(INIT
, "Skip bmt block 0x%x\n", block
);
378 if (nand_block_bad_bmt(OFFSET(block
)))
380 MSG(INIT
, "Skip bad block 0x%x\n", block
);
384 if (is_block_mapped(block
) >= 0)
386 MSG(INIT
, "Skip mapped block 0x%x\n", block
);
390 MSG(INIT
, "Find block 0x%x available\n", block
);
397 static unsigned short get_bad_index_from_oob(u8
* oob_buf
)
399 unsigned short index
;
400 if (2048 == PAGE_SIZE_BMT
) // sector 1024 FDM size = 16, mark location moved
402 memcpy(&index
, oob_buf
+ 13, OOB_INDEX_SIZE
);
405 memcpy(&index
, oob_buf
+ OOB_INDEX_OFFSET
, OOB_INDEX_SIZE
);
410 void set_bad_index_to_oob(u8
* oob
, u16 index
)
412 if (2048 == PAGE_SIZE_BMT
)
414 memcpy(oob
+ 13, &index
, sizeof(index
));
417 memcpy(oob
+ OOB_INDEX_OFFSET
, &index
, sizeof(index
));
421 static int migrate_from_bad(u64 offset
, u8
* write_dat
, u8
* write_oob
)
424 u32 error_block
= (u32
)(offset
>> nand_chip_bmt
->phys_erase_shift
);
425 u32 error_page
= (u32
)(offset
>> nand_chip_bmt
->page_shift
) % page_per_block
;
428 memcpy(oob_buf
, write_oob
, MAX_OOB_SIZE
);
430 to_index
= find_available_block(false);
434 MSG(INIT
, "Cannot find an available block for BMT\n");
438 { // migrate error page first
439 MSG(INIT
, "Write error page: 0x%x\n", error_page
);
442 nand_read_page_bmt(PAGE_ADDR(error_block
) + error_page
, dat_buf
, NULL
);
445 // memcpy(oob_buf, write_oob, MAX_OOB_SIZE);
447 if (error_block
< system_block_count
)
448 set_bad_index_to_oob(oob_buf
, error_block
); // if error_block is already a mapped block, original mapping index is in OOB.
450 if (!nand_write_page_bmt(PAGE_ADDR(to_index
) + error_page
, write_dat
, oob_buf
))
452 MSG(INIT
, "Write to page 0x%x fail\n", PAGE_ADDR(to_index
) + error_page
);
453 mark_block_bad_bmt(OFFSET(to_index
));
454 return migrate_from_bad(offset
, write_dat
, write_oob
);
458 for (page
= 0; page
< page_per_block
; page
++)
460 if (page
!= error_page
)
462 nand_read_page_bmt(PAGE_ADDR(error_block
) + page
, dat_buf
, oob_buf
);
463 if (is_page_used(dat_buf
, oob_buf
))
465 if (error_block
< system_block_count
)
467 set_bad_index_to_oob(oob_buf
, error_block
);
469 MSG(INIT
, "\tmigrate page 0x%x to page 0x%x\n", PAGE_ADDR(error_block
) + page
, PAGE_ADDR(to_index
) + page
);
470 if (!nand_write_page_bmt(PAGE_ADDR(to_index
) + page
, dat_buf
, oob_buf
))
472 MSG(INIT
, "Write to page 0x%x fail\n", PAGE_ADDR(to_index
) + page
);
473 mark_block_bad_bmt(OFFSET(to_index
));
474 return migrate_from_bad(offset
, write_dat
, write_oob
);
480 MSG(INIT
, "Migrate from 0x%x to 0x%x done!\n", error_block
, to_index
);
485 static bool write_bmt_to_flash(u8
* dat
, u8
* oob
)
487 bool need_erase
= true;
488 MSG(INIT
, "Try to write BMT\n");
490 if (bmt_block_index
== 0)
492 // if we don't have index, we don't need to erase found block as it has been erased in find_available_block()
494 if (!(bmt_block_index
= find_available_block(true)))
496 MSG(INIT
, "Cannot find an available block for BMT\n");
501 MSG(INIT
, "Find BMT block: 0x%x\n", bmt_block_index
);
503 // write bmt to flash
506 if (!nand_erase_bmt(OFFSET(bmt_block_index
)))
508 MSG(INIT
, "BMT block erase fail, mark bad: 0x%x\n", bmt_block_index
);
509 mark_block_bad_bmt(OFFSET(bmt_block_index
));
513 return write_bmt_to_flash(dat
, oob
); // recursive call
517 if (!nand_write_page_bmt(PAGE_ADDR(bmt_block_index
), dat
, oob
))
519 MSG(INIT
, "Write BMT data fail, need to write again\n");
520 mark_block_bad_bmt(OFFSET(bmt_block_index
));
524 return write_bmt_to_flash(dat
, oob
); // recursive call
527 MSG(INIT
, "Write BMT data to block 0x%x success\n", bmt_block_index
);
531 /*******************************************************************
532 * Reconstruct bmt, called when found bmt info doesn't match bad
533 * block info in flash.
535 * Return NULL for failure
536 *******************************************************************/
537 bmt_struct
*reconstruct_bmt(bmt_struct
* bmt
)
540 int index
= system_block_count
;
541 unsigned short bad_index
;
544 // init everything in BMT struct
545 bmt
->version
= BMT_VERSION
;
547 bmt
->mapped_count
= 0;
549 memset(bmt
->table
, 0, bmt_block_count
* sizeof(bmt_entry
));
551 for (i
= 0; i
< bmt_block_count
; i
++, index
++)
553 if (nand_block_bad_bmt(OFFSET(index
)))
555 MSG(INIT
, "Skip bad block: 0x%x\n", index
);
560 MSG(INIT
, "read page: 0x%x\n", PAGE_ADDR(index
));
561 nand_read_page_bmt(PAGE_ADDR(index
), dat_buf
, oob_buf
);
562 /* if (mtk_nand_read_page_hw(PAGE_ADDR(index), dat_buf))
564 MSG(INIT, "Error when read block %d\n", bmt_block_index);
568 if ((bad_index
= get_bad_index_from_oob(oob_buf
)) >= system_block_count
)
570 MSG(INIT
, "get bad index: 0x%x\n", bad_index
);
571 if (bad_index
!= 0xFFFF)
572 MSG(INIT
, "Invalid bad index found in block 0x%x, bad index 0x%x\n", index
, bad_index
);
576 MSG(INIT
, "Block 0x%x is mapped to bad block: 0x%x\n", index
, bad_index
);
578 if (!nand_block_bad_bmt(OFFSET(bad_index
)))
580 MSG(INIT
, "\tbut block 0x%x is not marked as bad, invalid mapping\n", bad_index
);
581 continue; // no need to erase here, it will be erased later when trying to write BMT
584 if ((mapped
= is_block_mapped(bad_index
)) >= 0)
586 MSG(INIT
, "bad block 0x%x is mapped to 0x%x, should be caused by power lost, replace with one\n", bmt
->table
[mapped
].bad_index
, bmt
->table
[mapped
].mapped_index
);
587 bmt
->table
[mapped
].mapped_index
= index
; // use new one instead.
590 // add mapping to BMT
591 bmt
->table
[bmt
->mapped_count
].bad_index
= bad_index
;
592 bmt
->table
[bmt
->mapped_count
].mapped_index
= index
;
596 MSG(INIT
, "Add mapping: 0x%x -> 0x%x to BMT\n", bad_index
, index
);
600 MSG(INIT
, "Scan replace pool done, mapped block: %d\n", bmt
->mapped_count
);
601 // dump_bmt_info(bmt);
603 // fill NAND BMT buffer
604 memset(oob_buf
, 0xFF, sizeof(oob_buf
));
605 fill_nand_bmt_buffer(bmt
, dat_buf
, oob_buf
);
608 if (!write_bmt_to_flash(dat_buf
, oob_buf
))
610 MSG(INIT
, "TRAGEDY: cannot find a place to write BMT!!!!\n");
616 /*******************************************************************
620 * Init bmt from nand. Reconstruct if not found or data error
623 * size: size of bmt and replace pool
626 * NULL for failure, and a bmt struct for success
627 *******************************************************************/
628 bmt_struct
*init_bmt(struct nand_chip
* chip
, int size
)
630 struct mtk_nand_host
*host
;
632 if (size
> 0 && size
< MAX_BMT_SIZE
)
634 MSG(INIT
, "Init bmt table, size: %d\n", size
);
635 bmt_block_count
= size
;
638 MSG(INIT
, "Invalid bmt table size: %d\n", size
);
641 nand_chip_bmt
= chip
;
642 system_block_count
= (u32
)(chip
->chipsize
>> chip
->phys_erase_shift
);
643 total_block_count
= bmt_block_count
+ system_block_count
;
644 page_per_block
= BLOCK_SIZE_BMT
/ PAGE_SIZE_BMT
;
645 host
= (struct mtk_nand_host
*)chip
->priv
;
646 mtd_bmt
= &host
->mtd
;
648 MSG(INIT
, "mtd_bmt: %p, nand_chip_bmt: %p\n", mtd_bmt
, nand_chip_bmt
);
649 MSG(INIT
, "bmt count: %d, system count: %d\n", bmt_block_count
, system_block_count
);
651 // set this flag, and unmapped block in pool will be erased.
653 memset(bmt
.table
, 0, size
* sizeof(bmt_entry
));
654 if ((bmt_block_index
= load_bmt_data(system_block_count
, size
)))
656 MSG(INIT
, "Load bmt data success @ block 0x%x\n", bmt_block_index
);
661 MSG(INIT
, "Load bmt data fail, need re-construct!\n");
662 if (reconstruct_bmt(&bmt
))
669 /*******************************************************************
676 * offset: update block/page offset.
677 * reason: update reason, see update_reason_t for reason.
678 * dat/oob: data and oob buffer for write fail.
681 * Return true for success, and false for failure.
682 *******************************************************************/
683 bool update_bmt(u64 offset
, update_reason_t reason
, u8
* dat
, u8
* oob
)
686 int orig_bad_block
= -1;
687 // int bmt_update_index;
689 u32 bad_index
= (u32
)(offset
>> nand_chip_bmt
->phys_erase_shift
);
693 if (reason
== UPDATE_WRITE_FAIL
)
695 MSG(INIT
, "Write fail, need to migrate\n");
696 if (!(map_index
= migrate_from_bad(offset
, dat
, oob
)))
698 MSG(INIT
, "migrate fail\n");
703 if (!(map_index
= find_available_block(false)))
705 MSG(INIT
, "Cannot find block in pool\n");
710 // now let's update BMT
711 if (bad_index
>= system_block_count
) // mapped block become bad, find original bad block
713 for (i
= 0; i
< bmt_block_count
; i
++)
715 if (bmt
.table
[i
].mapped_index
== bad_index
)
717 orig_bad_block
= bmt
.table
[i
].bad_index
;
722 MSG(INIT
, "Mapped block becomes bad, orig bad block is 0x%x\n", orig_bad_block
);
724 bmt
.table
[i
].mapped_index
= map_index
;
727 bmt
.table
[bmt
.mapped_count
].mapped_index
= map_index
;
728 bmt
.table
[bmt
.mapped_count
].bad_index
= bad_index
;
732 memset(oob_buf
, 0xFF, sizeof(oob_buf
));
733 fill_nand_bmt_buffer(&bmt
, dat_buf
, oob_buf
);
734 if (!write_bmt_to_flash(dat_buf
, oob_buf
))
737 mark_block_bad_bmt(offset
);
742 /*******************************************************************
746 * Given an block index, return mapped index if it's mapped, else
747 * return given index.
750 * index: given an block index. This value cannot exceed
751 * system_block_count.
753 * Return NULL for failure
754 *******************************************************************/
755 u16
get_mapping_block_index(int index
)
760 if (index
> system_block_count
)
765 for (i
= 0; i
< bmt
.mapped_count
; i
++)
767 if (bmt
.table
[i
].bad_index
== index
)
769 return bmt
.table
[i
].mapped_index
;
776 EXPORT_SYMBOL_GPL(init_bmt
);
777 EXPORT_SYMBOL_GPL(update_bmt
);
778 EXPORT_SYMBOL_GPL(get_mapping_block_index
);
780 MODULE_LICENSE("GPL");
781 MODULE_AUTHOR("MediaTek");
782 MODULE_DESCRIPTION("Bad Block mapping management for MediaTek NAND Flash Driver");