import PULS_20160108
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / misc / mediatek / nand / mt8127 / bmt.c
1 #include <mach/bmt.h>
2 #include <linux/module.h>
3 #include <linux/slab.h>
4
5 typedef struct
6 {
7 char signature[3];
8 u8 version;
9 u8 bad_count; // bad block count in pool
10 u8 mapped_count; // mapped block count in pool
11 u8 checksum;
12 u8 reseverd[13];
13 } phys_bmt_header;
14
15 typedef struct
16 {
17 phys_bmt_header header;
18 bmt_entry table[MAX_BMT_SIZE];
19 } phys_bmt_struct;
20
21 typedef struct
22 {
23 char signature[3];
24 } bmt_oob_data;
25
26 static char MAIN_SIGNATURE[] = "BMT";
27 static char OOB_SIGNATURE[] = "bmt";
28 #define SIGNATURE_SIZE (3)
29
30 #define MAX_DAT_SIZE 0x4000
31 #define MAX_OOB_SIZE 0x800
32
33 static struct mtd_info *mtd_bmt;
34 static struct nand_chip *nand_chip_bmt;
35 #define BLOCK_SIZE_BMT (1 << nand_chip_bmt->phys_erase_shift)
36 #define PAGE_SIZE_BMT (1 << nand_chip_bmt->page_shift)
37 #define PAGE_PER_SIZE_BMT (1 << (nand_chip_bmt->phys_erase_shift-nand_chip_bmt->page_shift))
38
39 #define OFFSET(block) (((u64)block) * BLOCK_SIZE_BMT)
40 #define PAGE_ADDR(block) ((block) * PAGE_PER_SIZE_BMT)
41
42 /*********************************************************************
43 * Flash is splited into 2 parts, system part is for normal system *
44 * system usage, size is system_block_count, another is replace pool *
45 * +-------------------------------------------------+ *
46 * | system_block_count | bmt_block_count | *
47 * +-------------------------------------------------+ *
48 *********************************************************************/
49 static u32 total_block_count; // block number in flash
50 static u32 system_block_count;
51 static int bmt_block_count; // bmt table size
52 // static int bmt_count; // block used in bmt
53 static int page_per_block; // page per count
54
55 static u32 bmt_block_index; // bmt block index
56 static bmt_struct bmt; // dynamic created global bmt table
57
58 static u8 dat_buf[MAX_DAT_SIZE];
59 static u8 oob_buf[MAX_OOB_SIZE];
60 static bool pool_erased;
61
62 /***************************************************************
63 *
64 * Interface adaptor for preloader/uboot/kernel
65 * These interfaces operate on physical address, read/write
66 * physical data.
67 *
68 ***************************************************************/
69 int nand_read_page_bmt(u32 page, u8 * dat, u8 * oob)
70 {
71 return mtk_nand_exec_read_page(mtd_bmt, page, PAGE_SIZE_BMT, dat, oob);
72 }
73
74 bool nand_block_bad_bmt(u64 offset)
75 {
76 return mtk_nand_block_bad_hw(mtd_bmt, offset);
77 }
78
79 bool nand_erase_bmt(u64 offset)
80 {
81 int status;
82 if (offset < 0x20000)
83 {
84 MSG(INIT, "erase offset: 0x%llx\n", offset);
85 }
86
87 status = mtk_nand_erase_hw(mtd_bmt, (u32)(offset >> nand_chip_bmt->page_shift)); // as nand_chip structure doesn't have a erase function defined
88 if (status & NAND_STATUS_FAIL)
89 return false;
90 else
91 return true;
92 }
93
94 int mark_block_bad_bmt(u64 offset)
95 {
96 return mtk_nand_block_markbad_hw(mtd_bmt, offset); //mark_block_bad_hw(offset);
97 }
98
99 bool nand_write_page_bmt(u32 page, u8 * dat, u8 * oob)
100 {
101 //printk("[xiaolei] nand_write_page_bmt 0x%x\n", (u32)dat);
102 if (mtk_nand_exec_write_page(mtd_bmt, page, PAGE_SIZE_BMT, dat, oob))
103 return false;
104 else
105 return true;
106 }
107
108 /***************************************************************
109 * *
110 * static internal function *
111 * *
112 ***************************************************************/
113 static void dump_bmt_info(bmt_struct * bmt)
114 {
115 int i;
116
117 MSG(INIT, "BMT v%d. total %d mapping:\n", bmt->version, bmt->mapped_count);
118 for (i = 0; i < bmt->mapped_count; i++)
119 {
120 MSG(INIT, "\t0x%x -> 0x%x\n", bmt->table[i].bad_index, bmt->table[i].mapped_index);
121 }
122 }
123
124 static bool match_bmt_signature(u8 * dat, u8 * oob)
125 {
126
127 if (memcmp(dat + MAIN_SIGNATURE_OFFSET, MAIN_SIGNATURE, SIGNATURE_SIZE))
128 {
129 return false;
130 }
131
132 if (memcmp(oob + OOB_SIGNATURE_OFFSET, OOB_SIGNATURE, SIGNATURE_SIZE))
133 {
134 MSG(INIT, "main signature match, oob signature doesn't match, but ignore\n");
135 }
136 return true;
137 }
138
139 static u8 cal_bmt_checksum(phys_bmt_struct * phys_table, int bmt_size)
140 {
141 int i;
142 u8 checksum = 0;
143 u8 *dat = (u8 *) phys_table;
144
145 checksum += phys_table->header.version;
146 checksum += phys_table->header.mapped_count;
147
148 dat += sizeof(phys_bmt_header);
149 for (i = 0; i < bmt_size * sizeof(bmt_entry); i++)
150 {
151 checksum += dat[i];
152 }
153
154 return checksum;
155 }
156
157
158 static int is_block_mapped(int index)
159 {
160 int i;
161 for (i = 0; i < bmt.mapped_count; i++)
162 {
163 if (index == bmt.table[i].mapped_index)
164 return i;
165 }
166 return -1;
167 }
168
169 static bool is_page_used(u8 * dat, u8 * oob)
170 {
171 if (2048 == PAGE_SIZE_BMT)
172 {
173 return ((oob[13] != 0xFF) || (oob[14] != 0xFF));
174 }else
175 {
176 return ((oob[OOB_INDEX_OFFSET] != 0xFF) || (oob[OOB_INDEX_OFFSET + 1] != 0xFF));
177 }
178 }
179
180 static bool valid_bmt_data(phys_bmt_struct * phys_table)
181 {
182 int i;
183 u8 checksum = cal_bmt_checksum(phys_table, bmt_block_count);
184
185 // checksum correct?
186 if (phys_table->header.checksum != checksum)
187 {
188 MSG(INIT, "BMT Data checksum error: %x %x\n", phys_table->header.checksum, checksum);
189 return false;
190 }
191
192 MSG(INIT, "BMT Checksum is: 0x%x\n", phys_table->header.checksum);
193
194 // block index correct?
195 for (i = 0; i < phys_table->header.mapped_count; i++)
196 {
197 if (phys_table->table[i].bad_index >= total_block_count || phys_table->table[i].mapped_index >= total_block_count || phys_table->table[i].mapped_index < system_block_count)
198 {
199 MSG(INIT, "index error: bad_index: %d, mapped_index: %d\n", phys_table->table[i].bad_index, phys_table->table[i].mapped_index);
200 return false;
201 }
202 }
203
204 // pass check, valid bmt.
205 MSG(INIT, "Valid BMT, version v%d\n", phys_table->header.version);
206 return true;
207 }
208
209 static void fill_nand_bmt_buffer(bmt_struct * bmt, u8 * dat, u8 * oob)
210 {
211 phys_bmt_struct *phys_bmt = NULL;
212
213 phys_bmt = (phys_bmt_struct *)kmalloc(sizeof(phys_bmt_struct),GFP_KERNEL);
214
215 if(!phys_bmt)
216 {
217 printk("[fill_nand_bmt_buffer]kmalloc phys_bmt_struct fail!\n");
218 while(1);
219 }
220
221 dump_bmt_info(bmt);
222
223 // fill phys_bmt_struct structure with bmt_struct
224 memset(phys_bmt, 0xFF, sizeof(phys_bmt_struct));
225
226 memcpy(phys_bmt->header.signature, MAIN_SIGNATURE, SIGNATURE_SIZE);
227 phys_bmt->header.version = BMT_VERSION;
228 // phys_bmt.header.bad_count = bmt->bad_count;
229 phys_bmt->header.mapped_count = bmt->mapped_count;
230 memcpy(phys_bmt->table, bmt->table, sizeof(bmt_entry) * bmt_block_count);
231
232 phys_bmt->header.checksum = cal_bmt_checksum(phys_bmt, bmt_block_count);
233
234 memcpy(dat + MAIN_SIGNATURE_OFFSET, phys_bmt, sizeof(phys_bmt_struct));
235 memcpy(oob + OOB_SIGNATURE_OFFSET, OOB_SIGNATURE, SIGNATURE_SIZE);
236 kfree(phys_bmt);
237 }
238
239 // return valid index if found BMT, else return 0
240 static int load_bmt_data(int start, int pool_size)
241 {
242 int bmt_index = start + pool_size - 1; // find from the end
243 phys_bmt_struct *phys_table = NULL;
244 int i;
245 phys_table = (phys_bmt_struct *)kmalloc(sizeof(phys_bmt_struct),GFP_KERNEL);
246
247 if(!phys_table)
248 {
249 printk("[load_bmt_data]kmalloc phys_bmt_struct fail!\n");
250 while(1);
251 }
252 MSG(INIT, "[%s]: begin to search BMT from block 0x%x\n", __FUNCTION__, bmt_index);
253
254 for (bmt_index = start + pool_size - 1; bmt_index >= start; bmt_index--)
255 {
256 if (nand_block_bad_bmt(OFFSET(bmt_index)))
257 {
258 MSG(INIT, "Skip bad block: %d\n", bmt_index);
259 continue;
260 }
261
262 if (!nand_read_page_bmt(PAGE_ADDR(bmt_index), dat_buf, oob_buf))
263 {
264 MSG(INIT, "Error found when read block %d\n", bmt_index);
265 continue;
266 }
267
268 if (!match_bmt_signature(dat_buf, oob_buf))
269 {
270 continue;
271 }
272
273 MSG(INIT, "Match bmt signature @ block: 0x%x\n", bmt_index);
274
275 memcpy(phys_table, dat_buf + MAIN_SIGNATURE_OFFSET, sizeof(phys_bmt_struct));
276
277 if (!valid_bmt_data(phys_table))
278 {
279 MSG(INIT, "BMT data is not correct %d\n", bmt_index);
280 continue;
281 } else
282 {
283 bmt.mapped_count = phys_table->header.mapped_count;
284 bmt.version = phys_table->header.version;
285 // bmt.bad_count = phys_table.header.bad_count;
286 memcpy(bmt.table, phys_table->table, bmt.mapped_count * sizeof(bmt_entry));
287
288 MSG(INIT, "bmt found at block: %d, mapped block: %d\n", bmt_index, bmt.mapped_count);
289
290 for (i = 0; i < bmt.mapped_count; i++)
291 {
292 if (!nand_block_bad_bmt(OFFSET(bmt.table[i].bad_index)))
293 {
294 MSG(INIT, "block 0x%x is not mark bad, should be power lost last time\n", bmt.table[i].bad_index);
295 mark_block_bad_bmt(OFFSET(bmt.table[i].bad_index));
296 }
297 }
298 kfree(phys_table);
299 return bmt_index;
300 }
301 }
302
303 MSG(INIT, "bmt block not found!\n");
304 kfree(phys_table);
305 return 0;
306 }
307
308 /*************************************************************************
309 * Find an available block and erase. *
310 * start_from_end: if true, find available block from end of flash. *
311 * else, find from the beginning of the pool *
312 * need_erase: if true, all unmapped blocks in the pool will be erased *
313 *************************************************************************/
314 static int find_available_block(bool start_from_end)
315 {
316 int i; // , j;
317 int block = system_block_count;
318 int direction;
319 // int avail_index = 0;
320 MSG(INIT, "Try to find_available_block, pool_erase: %d\n", pool_erased);
321
322 // erase all un-mapped blocks in pool when finding avaliable block
323 if (!pool_erased)
324 {
325 MSG(INIT, "Erase all un-mapped blocks in pool\n");
326 for (i = 0; i < bmt_block_count; i++)
327 {
328 if (block == bmt_block_index)
329 {
330 MSG(INIT, "Skip bmt block 0x%x\n", block);
331 continue;
332 }
333
334 if (nand_block_bad_bmt(OFFSET(block + i)))
335 {
336 MSG(INIT, "Skip bad block 0x%x\n", block + i);
337 continue;
338 }
339 //if(block==4095)
340 //{
341 // continue;
342 //}
343
344 if (is_block_mapped(block + i) >= 0)
345 {
346 MSG(INIT, "Skip mapped block 0x%x\n", block + i);
347 continue;
348 }
349
350 if (!nand_erase_bmt(OFFSET(block + i)))
351 {
352 MSG(INIT, "Erase block 0x%x failed\n", block + i);
353 mark_block_bad_bmt(OFFSET(block + i));
354 }
355 }
356
357 pool_erased = 1;
358 }
359
360 if (start_from_end)
361 {
362 block = total_block_count - 1;
363 direction = -1;
364 } else
365 {
366 block = system_block_count;
367 direction = 1;
368 }
369
370 for (i = 0; i < bmt_block_count; i++, block += direction)
371 {
372 if (block == bmt_block_index)
373 {
374 MSG(INIT, "Skip bmt block 0x%x\n", block);
375 continue;
376 }
377
378 if (nand_block_bad_bmt(OFFSET(block)))
379 {
380 MSG(INIT, "Skip bad block 0x%x\n", block);
381 continue;
382 }
383
384 if (is_block_mapped(block) >= 0)
385 {
386 MSG(INIT, "Skip mapped block 0x%x\n", block);
387 continue;
388 }
389
390 MSG(INIT, "Find block 0x%x available\n", block);
391 return block;
392 }
393
394 return 0;
395 }
396
397 static unsigned short get_bad_index_from_oob(u8 * oob_buf)
398 {
399 unsigned short index;
400 if (2048 == PAGE_SIZE_BMT) // sector 1024 FDM size = 16, mark location moved
401 {
402 memcpy(&index, oob_buf + 13, OOB_INDEX_SIZE);
403 }else
404 {
405 memcpy(&index, oob_buf + OOB_INDEX_OFFSET, OOB_INDEX_SIZE);
406 }
407 return index;
408 }
409
410 void set_bad_index_to_oob(u8 * oob, u16 index)
411 {
412 if (2048 == PAGE_SIZE_BMT)
413 {
414 memcpy(oob + 13, &index, sizeof(index));
415 }else
416 {
417 memcpy(oob + OOB_INDEX_OFFSET, &index, sizeof(index));
418 }
419 }
420
421 static int migrate_from_bad(u64 offset, u8 * write_dat, u8 * write_oob)
422 {
423 int page;
424 u32 error_block = (u32)(offset >> nand_chip_bmt->phys_erase_shift);
425 u32 error_page = (u32)(offset >> nand_chip_bmt->page_shift) % page_per_block;
426 int to_index;
427
428 memcpy(oob_buf, write_oob, MAX_OOB_SIZE);
429
430 to_index = find_available_block(false);
431
432 if (!to_index)
433 {
434 MSG(INIT, "Cannot find an available block for BMT\n");
435 return 0;
436 }
437
438 { // migrate error page first
439 MSG(INIT, "Write error page: 0x%x\n", error_page);
440 if (!write_dat)
441 {
442 nand_read_page_bmt(PAGE_ADDR(error_block) + error_page, dat_buf, NULL);
443 write_dat = dat_buf;
444 }
445 // memcpy(oob_buf, write_oob, MAX_OOB_SIZE);
446
447 if (error_block < system_block_count)
448 set_bad_index_to_oob(oob_buf, error_block); // if error_block is already a mapped block, original mapping index is in OOB.
449
450 if (!nand_write_page_bmt(PAGE_ADDR(to_index) + error_page, write_dat, oob_buf))
451 {
452 MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + error_page);
453 mark_block_bad_bmt(OFFSET(to_index));
454 return migrate_from_bad(offset, write_dat, write_oob);
455 }
456 }
457
458 for (page = 0; page < page_per_block; page++)
459 {
460 if (page != error_page)
461 {
462 nand_read_page_bmt(PAGE_ADDR(error_block) + page, dat_buf, oob_buf);
463 if (is_page_used(dat_buf, oob_buf))
464 {
465 if (error_block < system_block_count)
466 {
467 set_bad_index_to_oob(oob_buf, error_block);
468 }
469 MSG(INIT, "\tmigrate page 0x%x to page 0x%x\n", PAGE_ADDR(error_block) + page, PAGE_ADDR(to_index) + page);
470 if (!nand_write_page_bmt(PAGE_ADDR(to_index) + page, dat_buf, oob_buf))
471 {
472 MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + page);
473 mark_block_bad_bmt(OFFSET(to_index));
474 return migrate_from_bad(offset, write_dat, write_oob);
475 }
476 }
477 }
478 }
479
480 MSG(INIT, "Migrate from 0x%x to 0x%x done!\n", error_block, to_index);
481
482 return to_index;
483 }
484
485 static bool write_bmt_to_flash(u8 * dat, u8 * oob)
486 {
487 bool need_erase = true;
488 MSG(INIT, "Try to write BMT\n");
489
490 if (bmt_block_index == 0)
491 {
492 // if we don't have index, we don't need to erase found block as it has been erased in find_available_block()
493 need_erase = false;
494 if (!(bmt_block_index = find_available_block(true)))
495 {
496 MSG(INIT, "Cannot find an available block for BMT\n");
497 return false;
498 }
499 }
500
501 MSG(INIT, "Find BMT block: 0x%x\n", bmt_block_index);
502
503 // write bmt to flash
504 if (need_erase)
505 {
506 if (!nand_erase_bmt(OFFSET(bmt_block_index)))
507 {
508 MSG(INIT, "BMT block erase fail, mark bad: 0x%x\n", bmt_block_index);
509 mark_block_bad_bmt(OFFSET(bmt_block_index));
510 // bmt.bad_count++;
511
512 bmt_block_index = 0;
513 return write_bmt_to_flash(dat, oob); // recursive call
514 }
515 }
516
517 if (!nand_write_page_bmt(PAGE_ADDR(bmt_block_index), dat, oob))
518 {
519 MSG(INIT, "Write BMT data fail, need to write again\n");
520 mark_block_bad_bmt(OFFSET(bmt_block_index));
521 // bmt.bad_count++;
522
523 bmt_block_index = 0;
524 return write_bmt_to_flash(dat, oob); // recursive call
525 }
526
527 MSG(INIT, "Write BMT data to block 0x%x success\n", bmt_block_index);
528 return true;
529 }
530
531 /*******************************************************************
532 * Reconstruct bmt, called when found bmt info doesn't match bad
533 * block info in flash.
534 *
535 * Return NULL for failure
536 *******************************************************************/
537 bmt_struct *reconstruct_bmt(bmt_struct * bmt)
538 {
539 int i;
540 int index = system_block_count;
541 unsigned short bad_index;
542 int mapped;
543
544 // init everything in BMT struct
545 bmt->version = BMT_VERSION;
546 bmt->bad_count = 0;
547 bmt->mapped_count = 0;
548
549 memset(bmt->table, 0, bmt_block_count * sizeof(bmt_entry));
550
551 for (i = 0; i < bmt_block_count; i++, index++)
552 {
553 if (nand_block_bad_bmt(OFFSET(index)))
554 {
555 MSG(INIT, "Skip bad block: 0x%x\n", index);
556 // bmt->bad_count++;
557 continue;
558 }
559
560 MSG(INIT, "read page: 0x%x\n", PAGE_ADDR(index));
561 nand_read_page_bmt(PAGE_ADDR(index), dat_buf, oob_buf);
562 /* if (mtk_nand_read_page_hw(PAGE_ADDR(index), dat_buf))
563 {
564 MSG(INIT, "Error when read block %d\n", bmt_block_index);
565 continue;
566 } */
567
568 if ((bad_index = get_bad_index_from_oob(oob_buf)) >= system_block_count)
569 {
570 MSG(INIT, "get bad index: 0x%x\n", bad_index);
571 if (bad_index != 0xFFFF)
572 MSG(INIT, "Invalid bad index found in block 0x%x, bad index 0x%x\n", index, bad_index);
573 continue;
574 }
575
576 MSG(INIT, "Block 0x%x is mapped to bad block: 0x%x\n", index, bad_index);
577
578 if (!nand_block_bad_bmt(OFFSET(bad_index)))
579 {
580 MSG(INIT, "\tbut block 0x%x is not marked as bad, invalid mapping\n", bad_index);
581 continue; // no need to erase here, it will be erased later when trying to write BMT
582 }
583
584 if ((mapped = is_block_mapped(bad_index)) >= 0)
585 {
586 MSG(INIT, "bad block 0x%x is mapped to 0x%x, should be caused by power lost, replace with one\n", bmt->table[mapped].bad_index, bmt->table[mapped].mapped_index);
587 bmt->table[mapped].mapped_index = index; // use new one instead.
588 } else
589 {
590 // add mapping to BMT
591 bmt->table[bmt->mapped_count].bad_index = bad_index;
592 bmt->table[bmt->mapped_count].mapped_index = index;
593 bmt->mapped_count++;
594 }
595
596 MSG(INIT, "Add mapping: 0x%x -> 0x%x to BMT\n", bad_index, index);
597
598 }
599
600 MSG(INIT, "Scan replace pool done, mapped block: %d\n", bmt->mapped_count);
601 // dump_bmt_info(bmt);
602
603 // fill NAND BMT buffer
604 memset(oob_buf, 0xFF, sizeof(oob_buf));
605 fill_nand_bmt_buffer(bmt, dat_buf, oob_buf);
606
607 // write BMT back
608 if (!write_bmt_to_flash(dat_buf, oob_buf))
609 {
610 MSG(INIT, "TRAGEDY: cannot find a place to write BMT!!!!\n");
611 }
612
613 return bmt;
614 }
615
616 /*******************************************************************
617 * [BMT Interface]
618 *
619 * Description:
620 * Init bmt from nand. Reconstruct if not found or data error
621 *
622 * Parameter:
623 * size: size of bmt and replace pool
624 *
625 * Return:
626 * NULL for failure, and a bmt struct for success
627 *******************************************************************/
628 bmt_struct *init_bmt(struct nand_chip * chip, int size)
629 {
630 struct mtk_nand_host *host;
631
632 if (size > 0 && size < MAX_BMT_SIZE)
633 {
634 MSG(INIT, "Init bmt table, size: %d\n", size);
635 bmt_block_count = size;
636 } else
637 {
638 MSG(INIT, "Invalid bmt table size: %d\n", size);
639 return NULL;
640 }
641 nand_chip_bmt = chip;
642 system_block_count = (u32)(chip->chipsize >> chip->phys_erase_shift);
643 total_block_count = bmt_block_count + system_block_count;
644 page_per_block = BLOCK_SIZE_BMT / PAGE_SIZE_BMT;
645 host = (struct mtk_nand_host *)chip->priv;
646 mtd_bmt = &host->mtd;
647
648 MSG(INIT, "mtd_bmt: %p, nand_chip_bmt: %p\n", mtd_bmt, nand_chip_bmt);
649 MSG(INIT, "bmt count: %d, system count: %d\n", bmt_block_count, system_block_count);
650
651 // set this flag, and unmapped block in pool will be erased.
652 pool_erased = 0;
653 memset(bmt.table, 0, size * sizeof(bmt_entry));
654 if ((bmt_block_index = load_bmt_data(system_block_count, size)))
655 {
656 MSG(INIT, "Load bmt data success @ block 0x%x\n", bmt_block_index);
657 dump_bmt_info(&bmt);
658 return &bmt;
659 } else
660 {
661 MSG(INIT, "Load bmt data fail, need re-construct!\n");
662 if (reconstruct_bmt(&bmt))
663 return &bmt;
664 else
665 return NULL;
666 }
667 }
668
669 /*******************************************************************
670 * [BMT Interface]
671 *
672 * Description:
673 * Update BMT.
674 *
675 * Parameter:
676 * offset: update block/page offset.
677 * reason: update reason, see update_reason_t for reason.
678 * dat/oob: data and oob buffer for write fail.
679 *
680 * Return:
681 * Return true for success, and false for failure.
682 *******************************************************************/
683 bool update_bmt(u64 offset, update_reason_t reason, u8 * dat, u8 * oob)
684 {
685 int map_index;
686 int orig_bad_block = -1;
687 // int bmt_update_index;
688 int i;
689 u32 bad_index = (u32)(offset >> nand_chip_bmt->phys_erase_shift);
690
691 //return false;
692
693 if (reason == UPDATE_WRITE_FAIL)
694 {
695 MSG(INIT, "Write fail, need to migrate\n");
696 if (!(map_index = migrate_from_bad(offset, dat, oob)))
697 {
698 MSG(INIT, "migrate fail\n");
699 return false;
700 }
701 } else
702 {
703 if (!(map_index = find_available_block(false)))
704 {
705 MSG(INIT, "Cannot find block in pool\n");
706 return false;
707 }
708 }
709
710 // now let's update BMT
711 if (bad_index >= system_block_count) // mapped block become bad, find original bad block
712 {
713 for (i = 0; i < bmt_block_count; i++)
714 {
715 if (bmt.table[i].mapped_index == bad_index)
716 {
717 orig_bad_block = bmt.table[i].bad_index;
718 break;
719 }
720 }
721 // bmt.bad_count++;
722 MSG(INIT, "Mapped block becomes bad, orig bad block is 0x%x\n", orig_bad_block);
723
724 bmt.table[i].mapped_index = map_index;
725 } else
726 {
727 bmt.table[bmt.mapped_count].mapped_index = map_index;
728 bmt.table[bmt.mapped_count].bad_index = bad_index;
729 bmt.mapped_count++;
730 }
731
732 memset(oob_buf, 0xFF, sizeof(oob_buf));
733 fill_nand_bmt_buffer(&bmt, dat_buf, oob_buf);
734 if (!write_bmt_to_flash(dat_buf, oob_buf))
735 return false;
736
737 mark_block_bad_bmt(offset);
738
739 return true;
740 }
741
742 /*******************************************************************
743 * [BMT Interface]
744 *
745 * Description:
746 * Given an block index, return mapped index if it's mapped, else
747 * return given index.
748 *
749 * Parameter:
750 * index: given an block index. This value cannot exceed
751 * system_block_count.
752 *
753 * Return NULL for failure
754 *******************************************************************/
755 u16 get_mapping_block_index(int index)
756 {
757 int i;
758 //return index;
759
760 if (index > system_block_count)
761 {
762 return index;
763 }
764
765 for (i = 0; i < bmt.mapped_count; i++)
766 {
767 if (bmt.table[i].bad_index == index)
768 {
769 return bmt.table[i].mapped_index;
770 }
771 }
772
773 return index;
774 }
775
776 EXPORT_SYMBOL_GPL(init_bmt);
777 EXPORT_SYMBOL_GPL(update_bmt);
778 EXPORT_SYMBOL_GPL(get_mapping_block_index);
779
780 MODULE_LICENSE("GPL");
781 MODULE_AUTHOR("MediaTek");
782 MODULE_DESCRIPTION("Bad Block mapping management for MediaTek NAND Flash Driver");
783