[MTD] Introduce writesize
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / jffs2 / wbuf.c
CommitLineData
1da177e4
LT
1/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2001-2003 Red Hat, Inc.
5 * Copyright (C) 2004 Thomas Gleixner <tglx@linutronix.de>
6 *
7 * Created by David Woodhouse <dwmw2@infradead.org>
8 * Modified debugged and enhanced by Thomas Gleixner <tglx@linutronix.de>
9 *
10 * For licensing information, see the file 'LICENCE' in this directory.
11 *
daba5cc4 12 * $Id: wbuf.c,v 1.100 2005/09/30 13:59:13 dedekind Exp $
1da177e4
LT
13 *
14 */
15
16#include <linux/kernel.h>
17#include <linux/slab.h>
18#include <linux/mtd/mtd.h>
19#include <linux/crc32.h>
20#include <linux/mtd/nand.h>
4e57b681
TS
21#include <linux/jiffies.h>
22
1da177e4
LT
23#include "nodelist.h"
24
25/* For testing write failures */
26#undef BREAKME
27#undef BREAKMEHEADER
28
29#ifdef BREAKME
30static unsigned char *brokenbuf;
31#endif
32
daba5cc4
AB
33#define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) )
34#define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
35
1da177e4
LT
36/* max. erase failures before we mark a block bad */
37#define MAX_ERASE_FAILURES 2
38
1da177e4
LT
39struct jffs2_inodirty {
40 uint32_t ino;
41 struct jffs2_inodirty *next;
42};
43
44static struct jffs2_inodirty inodirty_nomem;
45
46static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino)
47{
48 struct jffs2_inodirty *this = c->wbuf_inodes;
49
50 /* If a malloc failed, consider _everything_ dirty */
51 if (this == &inodirty_nomem)
52 return 1;
53
54 /* If ino == 0, _any_ non-GC writes mean 'yes' */
55 if (this && !ino)
56 return 1;
57
58 /* Look to see if the inode in question is pending in the wbuf */
59 while (this) {
60 if (this->ino == ino)
61 return 1;
62 this = this->next;
63 }
64 return 0;
65}
66
67static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c)
68{
69 struct jffs2_inodirty *this;
70
71 this = c->wbuf_inodes;
72
73 if (this != &inodirty_nomem) {
74 while (this) {
75 struct jffs2_inodirty *next = this->next;
76 kfree(this);
77 this = next;
78 }
79 }
80 c->wbuf_inodes = NULL;
81}
82
83static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
84{
85 struct jffs2_inodirty *new;
86
87 /* Mark the superblock dirty so that kupdated will flush... */
4d952709 88 jffs2_erase_pending_trigger(c);
1da177e4
LT
89
90 if (jffs2_wbuf_pending_for_ino(c, ino))
91 return;
92
93 new = kmalloc(sizeof(*new), GFP_KERNEL);
94 if (!new) {
95 D1(printk(KERN_DEBUG "No memory to allocate inodirty. Fallback to all considered dirty\n"));
96 jffs2_clear_wbuf_ino_list(c);
97 c->wbuf_inodes = &inodirty_nomem;
98 return;
99 }
100 new->ino = ino;
101 new->next = c->wbuf_inodes;
102 c->wbuf_inodes = new;
103 return;
104}
105
106static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
107{
108 struct list_head *this, *next;
109 static int n;
110
111 if (list_empty(&c->erasable_pending_wbuf_list))
112 return;
113
114 list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
115 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
116
117 D1(printk(KERN_DEBUG "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", jeb->offset));
118 list_del(this);
119 if ((jiffies + (n++)) & 127) {
120 /* Most of the time, we just erase it immediately. Otherwise we
121 spend ages scanning it on mount, etc. */
122 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
123 list_add_tail(&jeb->list, &c->erase_pending_list);
124 c->nr_erasing_blocks++;
125 jffs2_erase_pending_trigger(c);
126 } else {
127 /* Sometimes, however, we leave it elsewhere so it doesn't get
128 immediately reused, and we spread the load a bit. */
129 D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
130 list_add_tail(&jeb->list, &c->erasable_list);
131 }
132 }
133}
134
7f716cf3
EH
135#define REFILE_NOTEMPTY 0
136#define REFILE_ANYWAY 1
137
138static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
1da177e4
LT
139{
140 D1(printk("About to refile bad block at %08x\n", jeb->offset));
141
1da177e4
LT
142 /* File the existing block on the bad_used_list.... */
143 if (c->nextblock == jeb)
144 c->nextblock = NULL;
145 else /* Not sure this should ever happen... need more coffee */
146 list_del(&jeb->list);
147 if (jeb->first_node) {
148 D1(printk("Refiling block at %08x to bad_used_list\n", jeb->offset));
149 list_add(&jeb->list, &c->bad_used_list);
150 } else {
9b88f473 151 BUG_ON(allow_empty == REFILE_NOTEMPTY);
1da177e4
LT
152 /* It has to have had some nodes or we couldn't be here */
153 D1(printk("Refiling block at %08x to erase_pending_list\n", jeb->offset));
154 list_add(&jeb->list, &c->erase_pending_list);
155 c->nr_erasing_blocks++;
156 jffs2_erase_pending_trigger(c);
157 }
1da177e4
LT
158
159 /* Adjust its size counts accordingly */
160 c->wasted_size += jeb->free_size;
161 c->free_size -= jeb->free_size;
162 jeb->wasted_size += jeb->free_size;
163 jeb->free_size = 0;
164
e0c8e42f
AB
165 jffs2_dbg_dump_block_lists_nolock(c);
166 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
167 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
1da177e4
LT
168}
169
170/* Recover from failure to write wbuf. Recover the nodes up to the
171 * wbuf, not the one which we were starting to try to write. */
172
173static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
174{
175 struct jffs2_eraseblock *jeb, *new_jeb;
176 struct jffs2_raw_node_ref **first_raw, **raw;
177 size_t retlen;
178 int ret;
179 unsigned char *buf;
180 uint32_t start, end, ofs, len;
181
182 spin_lock(&c->erase_completion_lock);
183
184 jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
185
7f716cf3 186 jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
1da177e4
LT
187
188 /* Find the first node to be recovered, by skipping over every
189 node which ends before the wbuf starts, or which is obsolete. */
190 first_raw = &jeb->first_node;
182ec4ee 191 while (*first_raw &&
1da177e4
LT
192 (ref_obsolete(*first_raw) ||
193 (ref_offset(*first_raw)+ref_totlen(c, jeb, *first_raw)) < c->wbuf_ofs)) {
194 D1(printk(KERN_DEBUG "Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
195 ref_offset(*first_raw), ref_flags(*first_raw),
196 (ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw)),
197 c->wbuf_ofs));
198 first_raw = &(*first_raw)->next_phys;
199 }
200
201 if (!*first_raw) {
202 /* All nodes were obsolete. Nothing to recover. */
203 D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n"));
204 spin_unlock(&c->erase_completion_lock);
205 return;
206 }
207
208 start = ref_offset(*first_raw);
209 end = ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw);
210
211 /* Find the last node to be recovered */
212 raw = first_raw;
213 while ((*raw)) {
214 if (!ref_obsolete(*raw))
215 end = ref_offset(*raw) + ref_totlen(c, jeb, *raw);
216
217 raw = &(*raw)->next_phys;
218 }
219 spin_unlock(&c->erase_completion_lock);
220
221 D1(printk(KERN_DEBUG "wbuf recover %08x-%08x\n", start, end));
222
223 buf = NULL;
224 if (start < c->wbuf_ofs) {
225 /* First affected node was already partially written.
226 * Attempt to reread the old data into our buffer. */
227
228 buf = kmalloc(end - start, GFP_KERNEL);
229 if (!buf) {
230 printk(KERN_CRIT "Malloc failure in wbuf recovery. Data loss ensues.\n");
231
232 goto read_failed;
233 }
234
235 /* Do the read... */
236 if (jffs2_cleanmarker_oob(c))
237 ret = c->mtd->read_ecc(c->mtd, start, c->wbuf_ofs - start, &retlen, buf, NULL, c->oobinfo);
238 else
239 ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf);
182ec4ee 240
1da177e4
LT
241 if (ret == -EBADMSG && retlen == c->wbuf_ofs - start) {
242 /* ECC recovered */
243 ret = 0;
244 }
245 if (ret || retlen != c->wbuf_ofs - start) {
246 printk(KERN_CRIT "Old data are already lost in wbuf recovery. Data loss ensues.\n");
247
248 kfree(buf);
249 buf = NULL;
250 read_failed:
251 first_raw = &(*first_raw)->next_phys;
252 /* If this was the only node to be recovered, give up */
253 if (!(*first_raw))
254 return;
255
256 /* It wasn't. Go on and try to recover nodes complete in the wbuf */
257 start = ref_offset(*first_raw);
258 } else {
259 /* Read succeeded. Copy the remaining data from the wbuf */
260 memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
261 }
262 }
263 /* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
264 Either 'buf' contains the data, or we find it in the wbuf */
265
266
267 /* ... and get an allocation of space from a shiny new block instead */
e631ddba 268 ret = jffs2_reserve_space_gc(c, end-start, &ofs, &len, JFFS2_SUMMARY_NOSUM_SIZE);
1da177e4
LT
269 if (ret) {
270 printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n");
9b88f473 271 kfree(buf);
1da177e4
LT
272 return;
273 }
274 if (end-start >= c->wbuf_pagesize) {
7f716cf3 275 /* Need to do another write immediately, but it's possible
9b88f473 276 that this is just because the wbuf itself is completely
182ec4ee
TG
277 full, and there's nothing earlier read back from the
278 flash. Hence 'buf' isn't necessarily what we're writing
9b88f473 279 from. */
7f716cf3 280 unsigned char *rewrite_buf = buf?:c->wbuf;
1da177e4
LT
281 uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
282
283 D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n",
284 towrite, ofs));
182ec4ee 285
1da177e4
LT
286#ifdef BREAKMEHEADER
287 static int breakme;
288 if (breakme++ == 20) {
289 printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs);
290 breakme = 0;
291 c->mtd->write_ecc(c->mtd, ofs, towrite, &retlen,
292 brokenbuf, NULL, c->oobinfo);
293 ret = -EIO;
294 } else
295#endif
296 if (jffs2_cleanmarker_oob(c))
297 ret = c->mtd->write_ecc(c->mtd, ofs, towrite, &retlen,
7f716cf3 298 rewrite_buf, NULL, c->oobinfo);
1da177e4 299 else
7f716cf3 300 ret = c->mtd->write(c->mtd, ofs, towrite, &retlen, rewrite_buf);
1da177e4
LT
301
302 if (ret || retlen != towrite) {
303 /* Argh. We tried. Really we did. */
304 printk(KERN_CRIT "Recovery of wbuf failed due to a second write error\n");
9b88f473 305 kfree(buf);
1da177e4
LT
306
307 if (retlen) {
308 struct jffs2_raw_node_ref *raw2;
309
310 raw2 = jffs2_alloc_raw_node_ref();
311 if (!raw2)
312 return;
313
314 raw2->flash_offset = ofs | REF_OBSOLETE;
1da177e4 315
fcb75787 316 jffs2_add_physical_node_ref(c, raw2, ref_totlen(c, jeb, *first_raw), NULL);
1da177e4
LT
317 }
318 return;
319 }
320 printk(KERN_NOTICE "Recovery of wbuf succeeded to %08x\n", ofs);
321
322 c->wbuf_len = (end - start) - towrite;
323 c->wbuf_ofs = ofs + towrite;
7f716cf3 324 memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
1da177e4 325 /* Don't muck about with c->wbuf_inodes. False positives are harmless. */
f99d49ad 326 kfree(buf);
1da177e4
LT
327 } else {
328 /* OK, now we're left with the dregs in whichever buffer we're using */
329 if (buf) {
330 memcpy(c->wbuf, buf, end-start);
331 kfree(buf);
332 } else {
333 memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
334 }
335 c->wbuf_ofs = ofs;
336 c->wbuf_len = end - start;
337 }
338
339 /* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */
340 new_jeb = &c->blocks[ofs / c->sector_size];
341
342 spin_lock(&c->erase_completion_lock);
343 if (new_jeb->first_node) {
344 /* Odd, but possible with ST flash later maybe */
345 new_jeb->last_node->next_phys = *first_raw;
346 } else {
347 new_jeb->first_node = *first_raw;
348 }
349
350 raw = first_raw;
351 while (*raw) {
352 uint32_t rawlen = ref_totlen(c, jeb, *raw);
353
354 D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n",
355 rawlen, ref_offset(*raw), ref_flags(*raw), ofs));
356
357 if (ref_obsolete(*raw)) {
358 /* Shouldn't really happen much */
359 new_jeb->dirty_size += rawlen;
360 new_jeb->free_size -= rawlen;
361 c->dirty_size += rawlen;
362 } else {
363 new_jeb->used_size += rawlen;
364 new_jeb->free_size -= rawlen;
365 jeb->dirty_size += rawlen;
366 jeb->used_size -= rawlen;
367 c->dirty_size += rawlen;
368 }
369 c->free_size -= rawlen;
370 (*raw)->flash_offset = ofs | ref_flags(*raw);
371 ofs += rawlen;
372 new_jeb->last_node = *raw;
373
374 raw = &(*raw)->next_phys;
375 }
376
377 /* Fix up the original jeb now it's on the bad_list */
378 *first_raw = NULL;
379 if (first_raw == &jeb->first_node) {
380 jeb->last_node = NULL;
381 D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset));
382 list_del(&jeb->list);
383 list_add(&jeb->list, &c->erase_pending_list);
384 c->nr_erasing_blocks++;
385 jffs2_erase_pending_trigger(c);
386 }
387 else
388 jeb->last_node = container_of(first_raw, struct jffs2_raw_node_ref, next_phys);
389
e0c8e42f
AB
390 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
391 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
1da177e4 392
e0c8e42f
AB
393 jffs2_dbg_acct_sanity_check_nolock(c, new_jeb);
394 jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
1da177e4
LT
395
396 spin_unlock(&c->erase_completion_lock);
397
398 D1(printk(KERN_DEBUG "wbuf recovery completed OK\n"));
399}
400
401/* Meaning of pad argument:
402 0: Do not pad. Probably pointless - we only ever use this when we can't pad anyway.
403 1: Pad, do not adjust nextblock free_size
404 2: Pad, adjust nextblock free_size
405*/
406#define NOPAD 0
407#define PAD_NOACCOUNT 1
408#define PAD_ACCOUNTING 2
409
410static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
411{
412 int ret;
413 size_t retlen;
414
3be36675 415 /* Nothing to do if not write-buffering the flash. In particular, we shouldn't
1da177e4 416 del_timer() the timer we never initialised. */
3be36675 417 if (!jffs2_is_writebuffered(c))
1da177e4
LT
418 return 0;
419
420 if (!down_trylock(&c->alloc_sem)) {
421 up(&c->alloc_sem);
422 printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n");
423 BUG();
424 }
425
3be36675 426 if (!c->wbuf_len) /* already checked c->wbuf above */
1da177e4
LT
427 return 0;
428
429 /* claim remaining space on the page
430 this happens, if we have a change to a new block,
431 or if fsync forces us to flush the writebuffer.
432 if we have a switch to next page, we will not have
182ec4ee 433 enough remaining space for this.
1da177e4 434 */
daba5cc4 435 if (pad ) {
1da177e4
LT
436 c->wbuf_len = PAD(c->wbuf_len);
437
438 /* Pad with JFFS2_DIRTY_BITMASK initially. this helps out ECC'd NOR
439 with 8 byte page size */
440 memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
182ec4ee 441
1da177e4
LT
442 if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) {
443 struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len);
444 padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
445 padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING);
446 padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len);
447 padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4));
448 }
449 }
450 /* else jffs2_flash_writev has actually filled in the rest of the
451 buffer for us, and will deal with the node refs etc. later. */
182ec4ee 452
1da177e4
LT
453#ifdef BREAKME
454 static int breakme;
455 if (breakme++ == 20) {
456 printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs);
457 breakme = 0;
458 c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize,
459 &retlen, brokenbuf, NULL, c->oobinfo);
460 ret = -EIO;
182ec4ee 461 } else
1da177e4 462#endif
182ec4ee 463
1da177e4
LT
464 if (jffs2_cleanmarker_oob(c))
465 ret = c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf, NULL, c->oobinfo);
466 else
467 ret = c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf);
468
469 if (ret || retlen != c->wbuf_pagesize) {
470 if (ret)
471 printk(KERN_WARNING "jffs2_flush_wbuf(): Write failed with %d\n",ret);
472 else {
473 printk(KERN_WARNING "jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
474 retlen, c->wbuf_pagesize);
475 ret = -EIO;
476 }
477
478 jffs2_wbuf_recover(c);
479
480 return ret;
481 }
482
1da177e4 483 /* Adjust free size of the block if we padded. */
daba5cc4 484 if (pad) {
1da177e4 485 struct jffs2_eraseblock *jeb;
0bcc099d
DW
486 struct jffs2_raw_node_ref *ref;
487 uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
1da177e4
LT
488
489 jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
490
491 D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
492 (jeb==c->nextblock)?"next":"", jeb->offset));
493
182ec4ee 494 /* wbuf_pagesize - wbuf_len is the amount of space that's to be
1da177e4
LT
495 padded. If there is less free space in the block than that,
496 something screwed up */
0bcc099d 497 if (jeb->free_size < waste) {
1da177e4 498 printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
0bcc099d 499 c->wbuf_ofs, c->wbuf_len, waste);
1da177e4
LT
500 printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
501 jeb->offset, jeb->free_size);
502 BUG();
503 }
0bcc099d
DW
504 ref = jffs2_alloc_raw_node_ref();
505 if (!ref)
506 return -ENOMEM;
507 ref->flash_offset = c->wbuf_ofs + c->wbuf_len;
508 ref->flash_offset |= REF_OBSOLETE;
509
510 spin_lock(&c->erase_completion_lock);
511
fcb75787 512 jffs2_link_node_ref(c, jeb, ref, waste, NULL);
0bcc099d
DW
513 /* FIXME: that made it count as dirty. Convert to wasted */
514 jeb->dirty_size -= waste;
515 c->dirty_size -= waste;
516 jeb->wasted_size += waste;
517 c->wasted_size += waste;
518 } else
519 spin_lock(&c->erase_completion_lock);
1da177e4
LT
520
521 /* Stick any now-obsoleted blocks on the erase_pending_list */
522 jffs2_refile_wbuf_blocks(c);
523 jffs2_clear_wbuf_ino_list(c);
524 spin_unlock(&c->erase_completion_lock);
525
526 memset(c->wbuf,0xff,c->wbuf_pagesize);
527 /* adjust write buffer offset, else we get a non contiguous write bug */
528 c->wbuf_ofs += c->wbuf_pagesize;
529 c->wbuf_len = 0;
530 return 0;
531}
532
182ec4ee 533/* Trigger garbage collection to flush the write-buffer.
1da177e4 534 If ino arg is zero, do it if _any_ real (i.e. not GC) writes are
182ec4ee 535 outstanding. If ino arg non-zero, do it only if a write for the
1da177e4
LT
536 given inode is outstanding. */
537int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
538{
539 uint32_t old_wbuf_ofs;
540 uint32_t old_wbuf_len;
541 int ret = 0;
542
543 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino));
544
8aee6ac1
DW
545 if (!c->wbuf)
546 return 0;
547
1da177e4
LT
548 down(&c->alloc_sem);
549 if (!jffs2_wbuf_pending_for_ino(c, ino)) {
550 D1(printk(KERN_DEBUG "Ino #%d not pending in wbuf. Returning\n", ino));
551 up(&c->alloc_sem);
552 return 0;
553 }
554
555 old_wbuf_ofs = c->wbuf_ofs;
556 old_wbuf_len = c->wbuf_len;
557
558 if (c->unchecked_size) {
559 /* GC won't make any progress for a while */
560 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() padding. Not finished checking\n"));
561 down_write(&c->wbuf_sem);
562 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
7f716cf3
EH
563 /* retry flushing wbuf in case jffs2_wbuf_recover
564 left some data in the wbuf */
565 if (ret)
7f716cf3 566 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
1da177e4
LT
567 up_write(&c->wbuf_sem);
568 } else while (old_wbuf_len &&
569 old_wbuf_ofs == c->wbuf_ofs) {
570
571 up(&c->alloc_sem);
572
573 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() calls gc pass\n"));
574
575 ret = jffs2_garbage_collect_pass(c);
576 if (ret) {
577 /* GC failed. Flush it with padding instead */
578 down(&c->alloc_sem);
579 down_write(&c->wbuf_sem);
580 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
7f716cf3
EH
581 /* retry flushing wbuf in case jffs2_wbuf_recover
582 left some data in the wbuf */
583 if (ret)
7f716cf3 584 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
1da177e4
LT
585 up_write(&c->wbuf_sem);
586 break;
587 }
588 down(&c->alloc_sem);
589 }
590
591 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() ends...\n"));
592
593 up(&c->alloc_sem);
594 return ret;
595}
596
597/* Pad write-buffer to end and write it, wasting space. */
598int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
599{
600 int ret;
601
8aee6ac1
DW
602 if (!c->wbuf)
603 return 0;
604
1da177e4
LT
605 down_write(&c->wbuf_sem);
606 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
7f716cf3
EH
607 /* retry - maybe wbuf recover left some data in wbuf. */
608 if (ret)
609 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
1da177e4
LT
610 up_write(&c->wbuf_sem);
611
612 return ret;
613}
1da177e4
LT
614int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsigned long count, loff_t to, size_t *retlen, uint32_t ino)
615{
616 struct kvec outvecs[3];
617 uint32_t totlen = 0;
618 uint32_t split_ofs = 0;
619 uint32_t old_totlen;
620 int ret, splitvec = -1;
621 int invec, outvec;
622 size_t wbuf_retlen;
623 unsigned char *wbuf_ptr;
624 size_t donelen = 0;
625 uint32_t outvec_to = to;
626
627 /* If not NAND flash, don't bother */
3be36675 628 if (!jffs2_is_writebuffered(c))
1da177e4 629 return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
182ec4ee 630
1da177e4
LT
631 down_write(&c->wbuf_sem);
632
633 /* If wbuf_ofs is not initialized, set it to target address */
634 if (c->wbuf_ofs == 0xFFFFFFFF) {
635 c->wbuf_ofs = PAGE_DIV(to);
182ec4ee 636 c->wbuf_len = PAGE_MOD(to);
1da177e4
LT
637 memset(c->wbuf,0xff,c->wbuf_pagesize);
638 }
639
640 /* Fixup the wbuf if we are moving to a new eraseblock. The checks below
641 fail for ECC'd NOR because cleanmarker == 16, so a block starts at
642 xxx0010. */
643 if (jffs2_nor_ecc(c)) {
644 if (((c->wbuf_ofs % c->sector_size) == 0) && !c->wbuf_len) {
645 c->wbuf_ofs = PAGE_DIV(to);
646 c->wbuf_len = PAGE_MOD(to);
647 memset(c->wbuf,0xff,c->wbuf_pagesize);
648 }
649 }
182ec4ee
TG
650
651 /* Sanity checks on target address.
652 It's permitted to write at PAD(c->wbuf_len+c->wbuf_ofs),
653 and it's permitted to write at the beginning of a new
1da177e4
LT
654 erase block. Anything else, and you die.
655 New block starts at xxx000c (0-b = block header)
656 */
3be36675 657 if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
1da177e4
LT
658 /* It's a write to a new block */
659 if (c->wbuf_len) {
660 D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx causes flush of wbuf at 0x%08x\n", (unsigned long)to, c->wbuf_ofs));
661 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
662 if (ret) {
663 /* the underlying layer has to check wbuf_len to do the cleanup */
664 D1(printk(KERN_WARNING "jffs2_flush_wbuf() called from jffs2_flash_writev() failed %d\n", ret));
665 *retlen = 0;
666 goto exit;
667 }
668 }
669 /* set pointer to new block */
670 c->wbuf_ofs = PAGE_DIV(to);
182ec4ee
TG
671 c->wbuf_len = PAGE_MOD(to);
672 }
1da177e4
LT
673
674 if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
675 /* We're not writing immediately after the writebuffer. Bad. */
676 printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write to %08lx\n", (unsigned long)to);
677 if (c->wbuf_len)
678 printk(KERN_CRIT "wbuf was previously %08x-%08x\n",
679 c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len);
680 BUG();
681 }
682
683 /* Note outvecs[3] above. We know count is never greater than 2 */
684 if (count > 2) {
685 printk(KERN_CRIT "jffs2_flash_writev(): count is %ld\n", count);
686 BUG();
687 }
688
689 invec = 0;
690 outvec = 0;
691
182ec4ee 692 /* Fill writebuffer first, if already in use */
1da177e4
LT
693 if (c->wbuf_len) {
694 uint32_t invec_ofs = 0;
695
182ec4ee 696 /* adjust alignment offset */
1da177e4
LT
697 if (c->wbuf_len != PAGE_MOD(to)) {
698 c->wbuf_len = PAGE_MOD(to);
699 /* take care of alignment to next page */
700 if (!c->wbuf_len)
701 c->wbuf_len = c->wbuf_pagesize;
702 }
182ec4ee 703
1da177e4
LT
704 while(c->wbuf_len < c->wbuf_pagesize) {
705 uint32_t thislen;
182ec4ee 706
1da177e4
LT
707 if (invec == count)
708 goto alldone;
709
710 thislen = c->wbuf_pagesize - c->wbuf_len;
711
712 if (thislen >= invecs[invec].iov_len)
713 thislen = invecs[invec].iov_len;
182ec4ee 714
1da177e4
LT
715 invec_ofs = thislen;
716
717 memcpy(c->wbuf + c->wbuf_len, invecs[invec].iov_base, thislen);
718 c->wbuf_len += thislen;
719 donelen += thislen;
720 /* Get next invec, if actual did not fill the buffer */
182ec4ee 721 if (c->wbuf_len < c->wbuf_pagesize)
1da177e4 722 invec++;
182ec4ee
TG
723 }
724
1da177e4
LT
725 /* write buffer is full, flush buffer */
726 ret = __jffs2_flush_wbuf(c, NOPAD);
727 if (ret) {
728 /* the underlying layer has to check wbuf_len to do the cleanup */
729 D1(printk(KERN_WARNING "jffs2_flush_wbuf() called from jffs2_flash_writev() failed %d\n", ret));
730 /* Retlen zero to make sure our caller doesn't mark the space dirty.
731 We've already done everything that's necessary */
732 *retlen = 0;
733 goto exit;
734 }
735 outvec_to += donelen;
736 c->wbuf_ofs = outvec_to;
737
738 /* All invecs done ? */
739 if (invec == count)
740 goto alldone;
741
742 /* Set up the first outvec, containing the remainder of the
743 invec we partially used */
744 if (invecs[invec].iov_len > invec_ofs) {
745 outvecs[0].iov_base = invecs[invec].iov_base+invec_ofs;
746 totlen = outvecs[0].iov_len = invecs[invec].iov_len-invec_ofs;
747 if (totlen > c->wbuf_pagesize) {
748 splitvec = outvec;
749 split_ofs = outvecs[0].iov_len - PAGE_MOD(totlen);
750 }
751 outvec++;
752 }
753 invec++;
754 }
755
756 /* OK, now we've flushed the wbuf and the start of the bits
757 we have been asked to write, now to write the rest.... */
758
759 /* totlen holds the amount of data still to be written */
760 old_totlen = totlen;
761 for ( ; invec < count; invec++,outvec++ ) {
762 outvecs[outvec].iov_base = invecs[invec].iov_base;
763 totlen += outvecs[outvec].iov_len = invecs[invec].iov_len;
764 if (PAGE_DIV(totlen) != PAGE_DIV(old_totlen)) {
765 splitvec = outvec;
766 split_ofs = outvecs[outvec].iov_len - PAGE_MOD(totlen);
767 old_totlen = totlen;
768 }
769 }
770
771 /* Now the outvecs array holds all the remaining data to write */
772 /* Up to splitvec,split_ofs is to be written immediately. The rest
773 goes into the (now-empty) wbuf */
774
775 if (splitvec != -1) {
776 uint32_t remainder;
777
778 remainder = outvecs[splitvec].iov_len - split_ofs;
779 outvecs[splitvec].iov_len = split_ofs;
780
781 /* We did cross a page boundary, so we write some now */
782 if (jffs2_cleanmarker_oob(c))
182ec4ee 783 ret = c->mtd->writev_ecc(c->mtd, outvecs, splitvec+1, outvec_to, &wbuf_retlen, NULL, c->oobinfo);
1da177e4
LT
784 else
785 ret = jffs2_flash_direct_writev(c, outvecs, splitvec+1, outvec_to, &wbuf_retlen);
182ec4ee 786
1da177e4
LT
787 if (ret < 0 || wbuf_retlen != PAGE_DIV(totlen)) {
788 /* At this point we have no problem,
7f716cf3
EH
789 c->wbuf is empty. However refile nextblock to avoid
790 writing again to same address.
1da177e4 791 */
7f716cf3
EH
792 struct jffs2_eraseblock *jeb;
793
794 spin_lock(&c->erase_completion_lock);
795
796 jeb = &c->blocks[outvec_to / c->sector_size];
797 jffs2_block_refile(c, jeb, REFILE_ANYWAY);
798
799 *retlen = 0;
800 spin_unlock(&c->erase_completion_lock);
1da177e4
LT
801 goto exit;
802 }
182ec4ee 803
1da177e4
LT
804 donelen += wbuf_retlen;
805 c->wbuf_ofs = PAGE_DIV(outvec_to) + PAGE_DIV(totlen);
806
807 if (remainder) {
808 outvecs[splitvec].iov_base += split_ofs;
809 outvecs[splitvec].iov_len = remainder;
810 } else {
811 splitvec++;
812 }
813
814 } else {
815 splitvec = 0;
816 }
817
818 /* Now splitvec points to the start of the bits we have to copy
819 into the wbuf */
820 wbuf_ptr = c->wbuf;
821
822 for ( ; splitvec < outvec; splitvec++) {
823 /* Don't copy the wbuf into itself */
824 if (outvecs[splitvec].iov_base == c->wbuf)
825 continue;
826 memcpy(wbuf_ptr, outvecs[splitvec].iov_base, outvecs[splitvec].iov_len);
827 wbuf_ptr += outvecs[splitvec].iov_len;
828 donelen += outvecs[splitvec].iov_len;
829 }
830 c->wbuf_len = wbuf_ptr - c->wbuf;
831
832 /* If there's a remainder in the wbuf and it's a non-GC write,
833 remember that the wbuf affects this ino */
834alldone:
835 *retlen = donelen;
836
e631ddba
FH
837 if (jffs2_sum_active()) {
838 int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to);
839 if (res)
840 return res;
841 }
842
1da177e4
LT
843 if (c->wbuf_len && ino)
844 jffs2_wbuf_dirties_inode(c, ino);
845
846 ret = 0;
182ec4ee 847
1da177e4
LT
848exit:
849 up_write(&c->wbuf_sem);
850 return ret;
851}
852
853/*
854 * This is the entry for flash write.
855 * Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
856*/
857int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, const u_char *buf)
858{
859 struct kvec vecs[1];
860
3be36675 861 if (!jffs2_is_writebuffered(c))
e631ddba 862 return jffs2_flash_direct_write(c, ofs, len, retlen, buf);
1da177e4
LT
863
864 vecs[0].iov_base = (unsigned char *) buf;
865 vecs[0].iov_len = len;
866 return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0);
867}
868
869/*
870 Handle readback from writebuffer and ECC failure return
871*/
872int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf)
873{
874 loff_t orbf = 0, owbf = 0, lwbf = 0;
875 int ret;
876
3be36675 877 if (!jffs2_is_writebuffered(c))
1da177e4
LT
878 return c->mtd->read(c->mtd, ofs, len, retlen, buf);
879
3be36675 880 /* Read flash */
894214d1 881 down_read(&c->wbuf_sem);
3be36675
AV
882 if (jffs2_cleanmarker_oob(c))
883 ret = c->mtd->read_ecc(c->mtd, ofs, len, retlen, buf, NULL, c->oobinfo);
884 else
885 ret = c->mtd->read(c->mtd, ofs, len, retlen, buf);
886
887 if ( (ret == -EBADMSG) && (*retlen == len) ) {
888 printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n",
889 len, ofs);
182ec4ee
TG
890 /*
891 * We have the raw data without ECC correction in the buffer, maybe
3be36675
AV
892 * we are lucky and all data or parts are correct. We check the node.
893 * If data are corrupted node check will sort it out.
894 * We keep this block, it will fail on write or erase and the we
895 * mark it bad. Or should we do that now? But we should give him a chance.
182ec4ee 896 * Maybe we had a system crash or power loss before the ecc write or
3be36675
AV
897 * a erase was completed.
898 * So we return success. :)
899 */
900 ret = 0;
182ec4ee 901 }
3be36675 902
1da177e4
LT
903 /* if no writebuffer available or write buffer empty, return */
904 if (!c->wbuf_pagesize || !c->wbuf_len)
894214d1 905 goto exit;
1da177e4
LT
906
907 /* if we read in a different block, return */
3be36675 908 if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs))
894214d1 909 goto exit;
1da177e4
LT
910
911 if (ofs >= c->wbuf_ofs) {
912 owbf = (ofs - c->wbuf_ofs); /* offset in write buffer */
913 if (owbf > c->wbuf_len) /* is read beyond write buffer ? */
914 goto exit;
915 lwbf = c->wbuf_len - owbf; /* number of bytes to copy */
182ec4ee 916 if (lwbf > len)
1da177e4 917 lwbf = len;
182ec4ee 918 } else {
1da177e4
LT
919 orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */
920 if (orbf > len) /* is write beyond write buffer ? */
921 goto exit;
922 lwbf = len - orbf; /* number of bytes to copy */
182ec4ee 923 if (lwbf > c->wbuf_len)
1da177e4 924 lwbf = c->wbuf_len;
182ec4ee 925 }
1da177e4
LT
926 if (lwbf > 0)
927 memcpy(buf+orbf,c->wbuf+owbf,lwbf);
928
929exit:
930 up_read(&c->wbuf_sem);
931 return ret;
932}
933
934/*
935 * Check, if the out of band area is empty
936 */
937int jffs2_check_oob_empty( struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int mode)
938{
939 unsigned char *buf;
940 int ret = 0;
941 int i,len,page;
942 size_t retlen;
943 int oob_size;
944
945 /* allocate a buffer for all oob data in this sector */
946 oob_size = c->mtd->oobsize;
947 len = 4 * oob_size;
948 buf = kmalloc(len, GFP_KERNEL);
949 if (!buf) {
950 printk(KERN_NOTICE "jffs2_check_oob_empty(): allocation of temporary data buffer for oob check failed\n");
951 return -ENOMEM;
952 }
182ec4ee 953 /*
1da177e4
LT
954 * if mode = 0, we scan for a total empty oob area, else we have
955 * to take care of the cleanmarker in the first page of the block
956 */
957 ret = jffs2_flash_read_oob(c, jeb->offset, len , &retlen, buf);
958 if (ret) {
959 D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB failed %d for block at %08x\n", ret, jeb->offset));
960 goto out;
961 }
182ec4ee 962
1da177e4
LT
963 if (retlen < len) {
964 D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB return short read "
965 "(%zd bytes not %d) for block at %08x\n", retlen, len, jeb->offset));
966 ret = -EIO;
967 goto out;
968 }
182ec4ee 969
1da177e4
LT
970 /* Special check for first page */
971 for(i = 0; i < oob_size ; i++) {
972 /* Yeah, we know about the cleanmarker. */
182ec4ee 973 if (mode && i >= c->fsdata_pos &&
1da177e4
LT
974 i < c->fsdata_pos + c->fsdata_len)
975 continue;
976
977 if (buf[i] != 0xFF) {
978 D2(printk(KERN_DEBUG "Found %02x at %x in OOB for %08x\n",
730554d9 979 buf[i], i, jeb->offset));
182ec4ee 980 ret = 1;
1da177e4
LT
981 goto out;
982 }
983 }
984
182ec4ee 985 /* we know, we are aligned :) */
1da177e4
LT
986 for (page = oob_size; page < len; page += sizeof(long)) {
987 unsigned long dat = *(unsigned long *)(&buf[page]);
988 if(dat != -1) {
182ec4ee 989 ret = 1;
1da177e4
LT
990 goto out;
991 }
992 }
993
994out:
182ec4ee
TG
995 kfree(buf);
996
1da177e4
LT
997 return ret;
998}
999
1000/*
1001* Scan for a valid cleanmarker and for bad blocks
1002* For virtual blocks (concatenated physical blocks) check the cleanmarker
1003* only in the first page of the first physical block, but scan for bad blocks in all
1004* physical blocks
1005*/
1006int jffs2_check_nand_cleanmarker (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
1007{
1008 struct jffs2_unknown_node n;
1009 unsigned char buf[2 * NAND_MAX_OOBSIZE];
1010 unsigned char *p;
1011 int ret, i, cnt, retval = 0;
1012 size_t retlen, offset;
1013 int oob_size;
1014
1015 offset = jeb->offset;
1016 oob_size = c->mtd->oobsize;
1017
1018 /* Loop through the physical blocks */
1019 for (cnt = 0; cnt < (c->sector_size / c->mtd->erasesize); cnt++) {
1020 /* Check first if the block is bad. */
1021 if (c->mtd->block_isbad (c->mtd, offset)) {
1022 D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Bad block at %08x\n", jeb->offset));
1023 return 2;
1024 }
1025 /*
1026 * We read oob data from page 0 and 1 of the block.
1027 * page 0 contains cleanmarker and badblock info
1028 * page 1 contains failure count of this block
1029 */
1030 ret = c->mtd->read_oob (c->mtd, offset, oob_size << 1, &retlen, buf);
1031
1032 if (ret) {
1033 D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Read OOB failed %d for block at %08x\n", ret, jeb->offset));
1034 return ret;
1035 }
1036 if (retlen < (oob_size << 1)) {
1037 D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Read OOB return short read (%zd bytes not %d) for block at %08x\n", retlen, oob_size << 1, jeb->offset));
1038 return -EIO;
1039 }
1040
1041 /* Check cleanmarker only on the first physical block */
1042 if (!cnt) {
1043 n.magic = cpu_to_je16 (JFFS2_MAGIC_BITMASK);
1044 n.nodetype = cpu_to_je16 (JFFS2_NODETYPE_CLEANMARKER);
1045 n.totlen = cpu_to_je32 (8);
1046 p = (unsigned char *) &n;
1047
1048 for (i = 0; i < c->fsdata_len; i++) {
1049 if (buf[c->fsdata_pos + i] != p[i]) {
1050 retval = 1;
1051 }
1052 }
1053 D1(if (retval == 1) {
1054 printk(KERN_WARNING "jffs2_check_nand_cleanmarker(): Cleanmarker node not detected in block at %08x\n", jeb->offset);
1055 printk(KERN_WARNING "OOB at %08x was ", offset);
1056 for (i=0; i < oob_size; i++) {
1057 printk("%02x ", buf[i]);
1058 }
1059 printk("\n");
1060 })
1061 }
1062 offset += c->mtd->erasesize;
1063 }
1064 return retval;
1065}
1066
1067int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
1068{
1069 struct jffs2_unknown_node n;
1070 int ret;
1071 size_t retlen;
1072
1073 n.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
1074 n.nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER);
1075 n.totlen = cpu_to_je32(8);
1076
1077 ret = jffs2_flash_write_oob(c, jeb->offset + c->fsdata_pos, c->fsdata_len, &retlen, (unsigned char *)&n);
182ec4ee 1078
1da177e4
LT
1079 if (ret) {
1080 D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
1081 return ret;
1082 }
1083 if (retlen != c->fsdata_len) {
1084 D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Short write for block at %08x: %zd not %d\n", jeb->offset, retlen, c->fsdata_len));
1085 return ret;
1086 }
1087 return 0;
1088}
1089
182ec4ee 1090/*
1da177e4
LT
1091 * On NAND we try to mark this block bad. If the block was erased more
1092 * than MAX_ERASE_FAILURES we mark it finaly bad.
1093 * Don't care about failures. This block remains on the erase-pending
1094 * or badblock list as long as nobody manipulates the flash with
1095 * a bootloader or something like that.
1096 */
1097
1098int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
1099{
1100 int ret;
1101
1102 /* if the count is < max, we try to write the counter to the 2nd page oob area */
1103 if( ++jeb->bad_count < MAX_ERASE_FAILURES)
1104 return 0;
1105
1106 if (!c->mtd->block_markbad)
1107 return 1; // What else can we do?
1108
1109 D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Marking bad block at %08x\n", bad_offset));
1110 ret = c->mtd->block_markbad(c->mtd, bad_offset);
182ec4ee 1111
1da177e4
LT
1112 if (ret) {
1113 D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
1114 return ret;
1115 }
1116 return 1;
1117}
1118
1119#define NAND_JFFS2_OOB16_FSDALEN 8
1120
1121static struct nand_oobinfo jffs2_oobinfo_docecc = {
1122 .useecc = MTD_NANDECC_PLACE,
1123 .eccbytes = 6,
1124 .eccpos = {0,1,2,3,4,5}
1125};
1126
1127
1128static int jffs2_nand_set_oobinfo(struct jffs2_sb_info *c)
1129{
1130 struct nand_oobinfo *oinfo = &c->mtd->oobinfo;
1131
1132 /* Do this only, if we have an oob buffer */
1133 if (!c->mtd->oobsize)
1134 return 0;
182ec4ee 1135
1da177e4
LT
1136 /* Cleanmarker is out-of-band, so inline size zero */
1137 c->cleanmarker_size = 0;
1138
1139 /* Should we use autoplacement ? */
1140 if (oinfo && oinfo->useecc == MTD_NANDECC_AUTOPLACE) {
1141 D1(printk(KERN_DEBUG "JFFS2 using autoplace on NAND\n"));
1142 /* Get the position of the free bytes */
1143 if (!oinfo->oobfree[0][1]) {
1144 printk (KERN_WARNING "jffs2_nand_set_oobinfo(): Eeep. Autoplacement selected and no empty space in oob\n");
1145 return -ENOSPC;
1146 }
1147 c->fsdata_pos = oinfo->oobfree[0][0];
1148 c->fsdata_len = oinfo->oobfree[0][1];
1149 if (c->fsdata_len > 8)
1150 c->fsdata_len = 8;
1151 } else {
1152 /* This is just a legacy fallback and should go away soon */
1153 switch(c->mtd->ecctype) {
1154 case MTD_ECC_RS_DiskOnChip:
1155 printk(KERN_WARNING "JFFS2 using DiskOnChip hardware ECC without autoplacement. Fix it!\n");
1156 c->oobinfo = &jffs2_oobinfo_docecc;
1157 c->fsdata_pos = 6;
1158 c->fsdata_len = NAND_JFFS2_OOB16_FSDALEN;
1159 c->badblock_pos = 15;
1160 break;
182ec4ee 1161
1da177e4
LT
1162 default:
1163 D1(printk(KERN_DEBUG "JFFS2 on NAND. No autoplacment info found\n"));
1164 return -EINVAL;
1165 }
1166 }
1167 return 0;
1168}
1169
1170int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
1171{
1172 int res;
1173
1174 /* Initialise write buffer */
1175 init_rwsem(&c->wbuf_sem);
28318776 1176 c->wbuf_pagesize = c->mtd->writesize;
1da177e4 1177 c->wbuf_ofs = 0xFFFFFFFF;
182ec4ee 1178
1da177e4
LT
1179 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1180 if (!c->wbuf)
1181 return -ENOMEM;
1182
1183 res = jffs2_nand_set_oobinfo(c);
1184
1185#ifdef BREAKME
1186 if (!brokenbuf)
1187 brokenbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1188 if (!brokenbuf) {
1189 kfree(c->wbuf);
1190 return -ENOMEM;
1191 }
1192 memset(brokenbuf, 0xdb, c->wbuf_pagesize);
1193#endif
1194 return res;
1195}
1196
1197void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
1198{
1199 kfree(c->wbuf);
1200}
1201
8f15fd55
AV
1202int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
1203 c->cleanmarker_size = 0; /* No cleanmarkers needed */
182ec4ee 1204
8f15fd55
AV
1205 /* Initialize write buffer */
1206 init_rwsem(&c->wbuf_sem);
8f15fd55 1207
182ec4ee 1208
daba5cc4 1209 c->wbuf_pagesize = c->mtd->erasesize;
182ec4ee 1210
daba5cc4
AB
1211 /* Find a suitable c->sector_size
1212 * - Not too much sectors
1213 * - Sectors have to be at least 4 K + some bytes
1214 * - All known dataflashes have erase sizes of 528 or 1056
1215 * - we take at least 8 eraseblocks and want to have at least 8K size
1216 * - The concatenation should be a power of 2
1217 */
1218
1219 c->sector_size = 8 * c->mtd->erasesize;
182ec4ee 1220
daba5cc4
AB
1221 while (c->sector_size < 8192) {
1222 c->sector_size *= 2;
1223 }
182ec4ee 1224
daba5cc4
AB
1225 /* It may be necessary to adjust the flash size */
1226 c->flash_size = c->mtd->size;
8f15fd55 1227
daba5cc4
AB
1228 if ((c->flash_size % c->sector_size) != 0) {
1229 c->flash_size = (c->flash_size / c->sector_size) * c->sector_size;
1230 printk(KERN_WARNING "JFFS2 flash size adjusted to %dKiB\n", c->flash_size);
1231 };
182ec4ee 1232
daba5cc4 1233 c->wbuf_ofs = 0xFFFFFFFF;
8f15fd55
AV
1234 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1235 if (!c->wbuf)
1236 return -ENOMEM;
1237
daba5cc4 1238 printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size);
8f15fd55
AV
1239
1240 return 0;
1241}
1242
1243void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) {
1244 kfree(c->wbuf);
1245}
8f15fd55 1246
1da177e4
LT
1247int jffs2_nor_ecc_flash_setup(struct jffs2_sb_info *c) {
1248 /* Cleanmarker is actually larger on the flashes */
1249 c->cleanmarker_size = 16;
1250
1251 /* Initialize write buffer */
1252 init_rwsem(&c->wbuf_sem);
1253 c->wbuf_pagesize = c->mtd->eccsize;
1254 c->wbuf_ofs = 0xFFFFFFFF;
1255
1256 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1257 if (!c->wbuf)
1258 return -ENOMEM;
1259
1260 return 0;
1261}
1262
1263void jffs2_nor_ecc_flash_cleanup(struct jffs2_sb_info *c) {
1264 kfree(c->wbuf);
1265}
59da721a
NP
1266
1267int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
1268 /* Cleanmarker currently occupies a whole programming region */
28318776 1269 c->cleanmarker_size = c->mtd->writesize;
59da721a
NP
1270
1271 /* Initialize write buffer */
1272 init_rwsem(&c->wbuf_sem);
28318776 1273 c->wbuf_pagesize = c->mtd->writesize;
59da721a
NP
1274 c->wbuf_ofs = 0xFFFFFFFF;
1275
1276 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1277 if (!c->wbuf)
1278 return -ENOMEM;
1279
1280 return 0;
1281}
1282
1283void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) {
1284 kfree(c->wbuf);
1285}