UBI: Unsorted Block Images
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / jffs2 / wbuf.c
CommitLineData
1da177e4
LT
1/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2001-2003 Red Hat, Inc.
5 * Copyright (C) 2004 Thomas Gleixner <tglx@linutronix.de>
6 *
7 * Created by David Woodhouse <dwmw2@infradead.org>
8 * Modified debugged and enhanced by Thomas Gleixner <tglx@linutronix.de>
9 *
10 * For licensing information, see the file 'LICENCE' in this directory.
11 *
daba5cc4 12 * $Id: wbuf.c,v 1.100 2005/09/30 13:59:13 dedekind Exp $
1da177e4
LT
13 *
14 */
15
16#include <linux/kernel.h>
17#include <linux/slab.h>
18#include <linux/mtd/mtd.h>
19#include <linux/crc32.h>
20#include <linux/mtd/nand.h>
4e57b681 21#include <linux/jiffies.h>
914e2637 22#include <linux/sched.h>
4e57b681 23
1da177e4
LT
24#include "nodelist.h"
25
26/* For testing write failures */
27#undef BREAKME
28#undef BREAKMEHEADER
29
30#ifdef BREAKME
31static unsigned char *brokenbuf;
32#endif
33
daba5cc4
AB
34#define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) )
35#define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
36
1da177e4
LT
37/* max. erase failures before we mark a block bad */
38#define MAX_ERASE_FAILURES 2
39
1da177e4
LT
40struct jffs2_inodirty {
41 uint32_t ino;
42 struct jffs2_inodirty *next;
43};
44
45static struct jffs2_inodirty inodirty_nomem;
46
47static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino)
48{
49 struct jffs2_inodirty *this = c->wbuf_inodes;
50
51 /* If a malloc failed, consider _everything_ dirty */
52 if (this == &inodirty_nomem)
53 return 1;
54
55 /* If ino == 0, _any_ non-GC writes mean 'yes' */
56 if (this && !ino)
57 return 1;
58
59 /* Look to see if the inode in question is pending in the wbuf */
60 while (this) {
61 if (this->ino == ino)
62 return 1;
63 this = this->next;
64 }
65 return 0;
66}
67
68static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c)
69{
70 struct jffs2_inodirty *this;
71
72 this = c->wbuf_inodes;
73
74 if (this != &inodirty_nomem) {
75 while (this) {
76 struct jffs2_inodirty *next = this->next;
77 kfree(this);
78 this = next;
79 }
80 }
81 c->wbuf_inodes = NULL;
82}
83
84static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
85{
86 struct jffs2_inodirty *new;
87
88 /* Mark the superblock dirty so that kupdated will flush... */
4d952709 89 jffs2_erase_pending_trigger(c);
1da177e4
LT
90
91 if (jffs2_wbuf_pending_for_ino(c, ino))
92 return;
93
94 new = kmalloc(sizeof(*new), GFP_KERNEL);
95 if (!new) {
96 D1(printk(KERN_DEBUG "No memory to allocate inodirty. Fallback to all considered dirty\n"));
97 jffs2_clear_wbuf_ino_list(c);
98 c->wbuf_inodes = &inodirty_nomem;
99 return;
100 }
101 new->ino = ino;
102 new->next = c->wbuf_inodes;
103 c->wbuf_inodes = new;
104 return;
105}
106
107static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
108{
109 struct list_head *this, *next;
110 static int n;
111
112 if (list_empty(&c->erasable_pending_wbuf_list))
113 return;
114
115 list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
116 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
117
118 D1(printk(KERN_DEBUG "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", jeb->offset));
119 list_del(this);
120 if ((jiffies + (n++)) & 127) {
121 /* Most of the time, we just erase it immediately. Otherwise we
122 spend ages scanning it on mount, etc. */
123 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
124 list_add_tail(&jeb->list, &c->erase_pending_list);
125 c->nr_erasing_blocks++;
126 jffs2_erase_pending_trigger(c);
127 } else {
128 /* Sometimes, however, we leave it elsewhere so it doesn't get
129 immediately reused, and we spread the load a bit. */
130 D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
131 list_add_tail(&jeb->list, &c->erasable_list);
132 }
133 }
134}
135
7f716cf3
EH
136#define REFILE_NOTEMPTY 0
137#define REFILE_ANYWAY 1
138
139static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
1da177e4
LT
140{
141 D1(printk("About to refile bad block at %08x\n", jeb->offset));
142
1da177e4
LT
143 /* File the existing block on the bad_used_list.... */
144 if (c->nextblock == jeb)
145 c->nextblock = NULL;
146 else /* Not sure this should ever happen... need more coffee */
147 list_del(&jeb->list);
148 if (jeb->first_node) {
149 D1(printk("Refiling block at %08x to bad_used_list\n", jeb->offset));
150 list_add(&jeb->list, &c->bad_used_list);
151 } else {
9b88f473 152 BUG_ON(allow_empty == REFILE_NOTEMPTY);
1da177e4
LT
153 /* It has to have had some nodes or we couldn't be here */
154 D1(printk("Refiling block at %08x to erase_pending_list\n", jeb->offset));
155 list_add(&jeb->list, &c->erase_pending_list);
156 c->nr_erasing_blocks++;
157 jffs2_erase_pending_trigger(c);
158 }
1da177e4 159
9bfeb691
DW
160 if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) {
161 uint32_t oldfree = jeb->free_size;
162
163 jffs2_link_node_ref(c, jeb,
164 (jeb->offset+c->sector_size-oldfree) | REF_OBSOLETE,
165 oldfree, NULL);
166 /* convert to wasted */
167 c->wasted_size += oldfree;
168 jeb->wasted_size += oldfree;
169 c->dirty_size -= oldfree;
170 jeb->dirty_size -= oldfree;
171 }
1da177e4 172
e0c8e42f
AB
173 jffs2_dbg_dump_block_lists_nolock(c);
174 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
175 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
1da177e4
LT
176}
177
9bfeb691
DW
178static struct jffs2_raw_node_ref **jffs2_incore_replace_raw(struct jffs2_sb_info *c,
179 struct jffs2_inode_info *f,
180 struct jffs2_raw_node_ref *raw,
181 union jffs2_node_union *node)
182{
183 struct jffs2_node_frag *frag;
184 struct jffs2_full_dirent *fd;
185
186 dbg_noderef("incore_replace_raw: node at %p is {%04x,%04x}\n",
187 node, je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype));
188
189 BUG_ON(je16_to_cpu(node->u.magic) != 0x1985 &&
190 je16_to_cpu(node->u.magic) != 0);
191
192 switch (je16_to_cpu(node->u.nodetype)) {
193 case JFFS2_NODETYPE_INODE:
ddc58bd6
DW
194 if (f->metadata && f->metadata->raw == raw) {
195 dbg_noderef("Will replace ->raw in f->metadata at %p\n", f->metadata);
196 return &f->metadata->raw;
197 }
9bfeb691
DW
198 frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset));
199 BUG_ON(!frag);
200 /* Find a frag which refers to the full_dnode we want to modify */
201 while (!frag->node || frag->node->raw != raw) {
202 frag = frag_next(frag);
203 BUG_ON(!frag);
204 }
205 dbg_noderef("Will replace ->raw in full_dnode at %p\n", frag->node);
206 return &frag->node->raw;
9bfeb691
DW
207
208 case JFFS2_NODETYPE_DIRENT:
209 for (fd = f->dents; fd; fd = fd->next) {
210 if (fd->raw == raw) {
211 dbg_noderef("Will replace ->raw in full_dirent at %p\n", fd);
212 return &fd->raw;
213 }
214 }
215 BUG();
ddc58bd6 216
9bfeb691
DW
217 default:
218 dbg_noderef("Don't care about replacing raw for nodetype %x\n",
219 je16_to_cpu(node->u.nodetype));
220 break;
221 }
222 return NULL;
223}
224
1da177e4
LT
225/* Recover from failure to write wbuf. Recover the nodes up to the
226 * wbuf, not the one which we were starting to try to write. */
227
228static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
229{
230 struct jffs2_eraseblock *jeb, *new_jeb;
9bfeb691 231 struct jffs2_raw_node_ref *raw, *next, *first_raw = NULL;
1da177e4
LT
232 size_t retlen;
233 int ret;
9bfeb691 234 int nr_refile = 0;
1da177e4
LT
235 unsigned char *buf;
236 uint32_t start, end, ofs, len;
237
046b8b98
DW
238 jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
239
1da177e4 240 spin_lock(&c->erase_completion_lock);
180bfb31
VW
241 if (c->wbuf_ofs % c->mtd->erasesize)
242 jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
243 else
244 jffs2_block_refile(c, jeb, REFILE_ANYWAY);
9bfeb691
DW
245 spin_unlock(&c->erase_completion_lock);
246
247 BUG_ON(!ref_obsolete(jeb->last_node));
1da177e4
LT
248
249 /* Find the first node to be recovered, by skipping over every
250 node which ends before the wbuf starts, or which is obsolete. */
9bfeb691
DW
251 for (next = raw = jeb->first_node; next; raw = next) {
252 next = ref_next(raw);
253
254 if (ref_obsolete(raw) ||
255 (next && ref_offset(next) <= c->wbuf_ofs)) {
256 dbg_noderef("Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
257 ref_offset(raw), ref_flags(raw),
258 (ref_offset(raw) + ref_totlen(c, jeb, raw)),
259 c->wbuf_ofs);
260 continue;
261 }
262 dbg_noderef("First node to be recovered is at 0x%08x(%d)-0x%08x\n",
263 ref_offset(raw), ref_flags(raw),
264 (ref_offset(raw) + ref_totlen(c, jeb, raw)));
265
266 first_raw = raw;
267 break;
268 }
269
270 if (!first_raw) {
1da177e4
LT
271 /* All nodes were obsolete. Nothing to recover. */
272 D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n"));
9bfeb691 273 c->wbuf_len = 0;
1da177e4
LT
274 return;
275 }
276
9bfeb691
DW
277 start = ref_offset(first_raw);
278 end = ref_offset(jeb->last_node);
279 nr_refile = 1;
1da177e4 280
9bfeb691
DW
281 /* Count the number of refs which need to be copied */
282 while ((raw = ref_next(raw)) != jeb->last_node)
283 nr_refile++;
1da177e4 284
9bfeb691
DW
285 dbg_noderef("wbuf recover %08x-%08x (%d bytes in %d nodes)\n",
286 start, end, end - start, nr_refile);
1da177e4
LT
287
288 buf = NULL;
289 if (start < c->wbuf_ofs) {
290 /* First affected node was already partially written.
291 * Attempt to reread the old data into our buffer. */
292
293 buf = kmalloc(end - start, GFP_KERNEL);
294 if (!buf) {
295 printk(KERN_CRIT "Malloc failure in wbuf recovery. Data loss ensues.\n");
296
297 goto read_failed;
298 }
299
300 /* Do the read... */
9223a456 301 ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf);
182ec4ee 302
9a1fcdfd
TG
303 /* ECC recovered ? */
304 if ((ret == -EUCLEAN || ret == -EBADMSG) &&
305 (retlen == c->wbuf_ofs - start))
1da177e4 306 ret = 0;
9a1fcdfd 307
1da177e4
LT
308 if (ret || retlen != c->wbuf_ofs - start) {
309 printk(KERN_CRIT "Old data are already lost in wbuf recovery. Data loss ensues.\n");
310
311 kfree(buf);
312 buf = NULL;
313 read_failed:
9bfeb691
DW
314 first_raw = ref_next(first_raw);
315 nr_refile--;
316 while (first_raw && ref_obsolete(first_raw)) {
317 first_raw = ref_next(first_raw);
318 nr_refile--;
319 }
320
1da177e4 321 /* If this was the only node to be recovered, give up */
9bfeb691
DW
322 if (!first_raw) {
323 c->wbuf_len = 0;
1da177e4 324 return;
9bfeb691 325 }
1da177e4
LT
326
327 /* It wasn't. Go on and try to recover nodes complete in the wbuf */
9bfeb691
DW
328 start = ref_offset(first_raw);
329 dbg_noderef("wbuf now recover %08x-%08x (%d bytes in %d nodes)\n",
330 start, end, end - start, nr_refile);
331
1da177e4
LT
332 } else {
333 /* Read succeeded. Copy the remaining data from the wbuf */
334 memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
335 }
336 }
337 /* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
338 Either 'buf' contains the data, or we find it in the wbuf */
339
1da177e4 340 /* ... and get an allocation of space from a shiny new block instead */
9fe4854c 341 ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE);
1da177e4
LT
342 if (ret) {
343 printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n");
9b88f473 344 kfree(buf);
1da177e4
LT
345 return;
346 }
9bfeb691
DW
347
348 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile);
349 if (ret) {
350 printk(KERN_WARNING "Failed to allocate node refs for wbuf recovery. Data loss ensues.\n");
351 kfree(buf);
352 return;
353 }
354
9fe4854c
DW
355 ofs = write_ofs(c);
356
1da177e4 357 if (end-start >= c->wbuf_pagesize) {
7f716cf3 358 /* Need to do another write immediately, but it's possible
9b88f473 359 that this is just because the wbuf itself is completely
182ec4ee
TG
360 full, and there's nothing earlier read back from the
361 flash. Hence 'buf' isn't necessarily what we're writing
9b88f473 362 from. */
7f716cf3 363 unsigned char *rewrite_buf = buf?:c->wbuf;
1da177e4
LT
364 uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
365
366 D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n",
367 towrite, ofs));
182ec4ee 368
1da177e4
LT
369#ifdef BREAKMEHEADER
370 static int breakme;
371 if (breakme++ == 20) {
372 printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs);
373 breakme = 0;
9223a456
TG
374 c->mtd->write(c->mtd, ofs, towrite, &retlen,
375 brokenbuf);
1da177e4
LT
376 ret = -EIO;
377 } else
378#endif
9223a456
TG
379 ret = c->mtd->write(c->mtd, ofs, towrite, &retlen,
380 rewrite_buf);
1da177e4
LT
381
382 if (ret || retlen != towrite) {
383 /* Argh. We tried. Really we did. */
384 printk(KERN_CRIT "Recovery of wbuf failed due to a second write error\n");
9b88f473 385 kfree(buf);
1da177e4 386
2f785402 387 if (retlen)
9bfeb691 388 jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, first_raw), NULL);
1da177e4 389
1da177e4
LT
390 return;
391 }
392 printk(KERN_NOTICE "Recovery of wbuf succeeded to %08x\n", ofs);
393
394 c->wbuf_len = (end - start) - towrite;
395 c->wbuf_ofs = ofs + towrite;
7f716cf3 396 memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
1da177e4 397 /* Don't muck about with c->wbuf_inodes. False positives are harmless. */
1da177e4
LT
398 } else {
399 /* OK, now we're left with the dregs in whichever buffer we're using */
400 if (buf) {
401 memcpy(c->wbuf, buf, end-start);
1da177e4
LT
402 } else {
403 memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
404 }
405 c->wbuf_ofs = ofs;
406 c->wbuf_len = end - start;
407 }
408
409 /* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */
410 new_jeb = &c->blocks[ofs / c->sector_size];
411
412 spin_lock(&c->erase_completion_lock);
9bfeb691
DW
413 for (raw = first_raw; raw != jeb->last_node; raw = ref_next(raw)) {
414 uint32_t rawlen = ref_totlen(c, jeb, raw);
415 struct jffs2_inode_cache *ic;
416 struct jffs2_raw_node_ref *new_ref;
417 struct jffs2_raw_node_ref **adjust_ref = NULL;
418 struct jffs2_inode_info *f = NULL;
1da177e4
LT
419
420 D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n",
9bfeb691
DW
421 rawlen, ref_offset(raw), ref_flags(raw), ofs));
422
423 ic = jffs2_raw_ref_to_ic(raw);
424
425 /* Ick. This XATTR mess should be fixed shortly... */
426 if (ic && ic->class == RAWNODE_CLASS_XATTR_DATUM) {
427 struct jffs2_xattr_datum *xd = (void *)ic;
428 BUG_ON(xd->node != raw);
429 adjust_ref = &xd->node;
430 raw->next_in_ino = NULL;
431 ic = NULL;
432 } else if (ic && ic->class == RAWNODE_CLASS_XATTR_REF) {
433 struct jffs2_xattr_datum *xr = (void *)ic;
434 BUG_ON(xr->node != raw);
435 adjust_ref = &xr->node;
436 raw->next_in_ino = NULL;
437 ic = NULL;
438 } else if (ic && ic->class == RAWNODE_CLASS_INODE_CACHE) {
439 struct jffs2_raw_node_ref **p = &ic->nodes;
440
441 /* Remove the old node from the per-inode list */
442 while (*p && *p != (void *)ic) {
443 if (*p == raw) {
444 (*p) = (raw->next_in_ino);
445 raw->next_in_ino = NULL;
446 break;
447 }
448 p = &((*p)->next_in_ino);
449 }
1da177e4 450
9bfeb691
DW
451 if (ic->state == INO_STATE_PRESENT && !ref_obsolete(raw)) {
452 /* If it's an in-core inode, then we have to adjust any
453 full_dirent or full_dnode structure to point to the
454 new version instead of the old */
455 f = jffs2_gc_fetch_inode(c, ic->ino, ic->nlink);
456 if (IS_ERR(f)) {
457 /* Should never happen; it _must_ be present */
458 JFFS2_ERROR("Failed to iget() ino #%u, err %ld\n",
459 ic->ino, PTR_ERR(f));
460 BUG();
461 }
462 /* We don't lock f->sem. There's a number of ways we could
463 end up in here with it already being locked, and nobody's
464 going to modify it on us anyway because we hold the
465 alloc_sem. We're only changing one ->raw pointer too,
466 which we can get away with without upsetting readers. */
467 adjust_ref = jffs2_incore_replace_raw(c, f, raw,
468 (void *)(buf?:c->wbuf) + (ref_offset(raw) - start));
469 } else if (unlikely(ic->state != INO_STATE_PRESENT &&
470 ic->state != INO_STATE_CHECKEDABSENT &&
471 ic->state != INO_STATE_GC)) {
472 JFFS2_ERROR("Inode #%u is in strange state %d!\n", ic->ino, ic->state);
473 BUG();
474 }
475 }
476
477 new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic);
478
479 if (adjust_ref) {
480 BUG_ON(*adjust_ref != raw);
481 *adjust_ref = new_ref;
482 }
483 if (f)
484 jffs2_gc_release_inode(c, f);
485
486 if (!ref_obsolete(raw)) {
1da177e4
LT
487 jeb->dirty_size += rawlen;
488 jeb->used_size -= rawlen;
489 c->dirty_size += rawlen;
9bfeb691
DW
490 c->used_size -= rawlen;
491 raw->flash_offset = ref_offset(raw) | REF_OBSOLETE;
492 BUG_ON(raw->next_in_ino);
1da177e4 493 }
1da177e4 494 ofs += rawlen;
1da177e4
LT
495 }
496
9bfeb691
DW
497 kfree(buf);
498
1da177e4 499 /* Fix up the original jeb now it's on the bad_list */
9bfeb691 500 if (first_raw == jeb->first_node) {
1da177e4 501 D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset));
f116629d 502 list_move(&jeb->list, &c->erase_pending_list);
1da177e4
LT
503 c->nr_erasing_blocks++;
504 jffs2_erase_pending_trigger(c);
505 }
1da177e4 506
e0c8e42f 507 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
9bfeb691 508 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
1da177e4 509
e0c8e42f 510 jffs2_dbg_acct_sanity_check_nolock(c, new_jeb);
9bfeb691 511 jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
1da177e4
LT
512
513 spin_unlock(&c->erase_completion_lock);
514
9bfeb691
DW
515 D1(printk(KERN_DEBUG "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n", c->wbuf_ofs, c->wbuf_len));
516
1da177e4
LT
517}
518
519/* Meaning of pad argument:
520 0: Do not pad. Probably pointless - we only ever use this when we can't pad anyway.
521 1: Pad, do not adjust nextblock free_size
522 2: Pad, adjust nextblock free_size
523*/
524#define NOPAD 0
525#define PAD_NOACCOUNT 1
526#define PAD_ACCOUNTING 2
527
528static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
529{
9bfeb691 530 struct jffs2_eraseblock *wbuf_jeb;
1da177e4
LT
531 int ret;
532 size_t retlen;
533
3be36675 534 /* Nothing to do if not write-buffering the flash. In particular, we shouldn't
1da177e4 535 del_timer() the timer we never initialised. */
3be36675 536 if (!jffs2_is_writebuffered(c))
1da177e4
LT
537 return 0;
538
539 if (!down_trylock(&c->alloc_sem)) {
540 up(&c->alloc_sem);
541 printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n");
542 BUG();
543 }
544
3be36675 545 if (!c->wbuf_len) /* already checked c->wbuf above */
1da177e4
LT
546 return 0;
547
9bfeb691
DW
548 wbuf_jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
549 if (jffs2_prealloc_raw_node_refs(c, wbuf_jeb, c->nextblock->allocated_refs + 1))
2f785402
DW
550 return -ENOMEM;
551
1da177e4
LT
552 /* claim remaining space on the page
553 this happens, if we have a change to a new block,
554 or if fsync forces us to flush the writebuffer.
555 if we have a switch to next page, we will not have
182ec4ee 556 enough remaining space for this.
1da177e4 557 */
daba5cc4 558 if (pad ) {
1da177e4
LT
559 c->wbuf_len = PAD(c->wbuf_len);
560
561 /* Pad with JFFS2_DIRTY_BITMASK initially. this helps out ECC'd NOR
562 with 8 byte page size */
563 memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
182ec4ee 564
1da177e4
LT
565 if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) {
566 struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len);
567 padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
568 padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING);
569 padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len);
570 padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4));
571 }
572 }
573 /* else jffs2_flash_writev has actually filled in the rest of the
574 buffer for us, and will deal with the node refs etc. later. */
182ec4ee 575
1da177e4
LT
576#ifdef BREAKME
577 static int breakme;
578 if (breakme++ == 20) {
579 printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs);
580 breakme = 0;
9223a456
TG
581 c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen,
582 brokenbuf);
1da177e4 583 ret = -EIO;
182ec4ee 584 } else
1da177e4 585#endif
182ec4ee 586
1da177e4
LT
587 ret = c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf);
588
589 if (ret || retlen != c->wbuf_pagesize) {
590 if (ret)
591 printk(KERN_WARNING "jffs2_flush_wbuf(): Write failed with %d\n",ret);
592 else {
593 printk(KERN_WARNING "jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
594 retlen, c->wbuf_pagesize);
595 ret = -EIO;
596 }
597
598 jffs2_wbuf_recover(c);
599
600 return ret;
601 }
602
1da177e4 603 /* Adjust free size of the block if we padded. */
daba5cc4 604 if (pad) {
0bcc099d 605 uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
1da177e4 606
1da177e4 607 D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
9bfeb691 608 (wbuf_jeb==c->nextblock)?"next":"", wbuf_jeb->offset));
1da177e4 609
182ec4ee 610 /* wbuf_pagesize - wbuf_len is the amount of space that's to be
1da177e4
LT
611 padded. If there is less free space in the block than that,
612 something screwed up */
9bfeb691 613 if (wbuf_jeb->free_size < waste) {
1da177e4 614 printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
0bcc099d 615 c->wbuf_ofs, c->wbuf_len, waste);
1da177e4 616 printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
9bfeb691 617 wbuf_jeb->offset, wbuf_jeb->free_size);
1da177e4
LT
618 BUG();
619 }
0bcc099d
DW
620
621 spin_lock(&c->erase_completion_lock);
622
9bfeb691 623 jffs2_link_node_ref(c, wbuf_jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL);
0bcc099d 624 /* FIXME: that made it count as dirty. Convert to wasted */
9bfeb691 625 wbuf_jeb->dirty_size -= waste;
0bcc099d 626 c->dirty_size -= waste;
9bfeb691 627 wbuf_jeb->wasted_size += waste;
0bcc099d
DW
628 c->wasted_size += waste;
629 } else
630 spin_lock(&c->erase_completion_lock);
1da177e4
LT
631
632 /* Stick any now-obsoleted blocks on the erase_pending_list */
633 jffs2_refile_wbuf_blocks(c);
634 jffs2_clear_wbuf_ino_list(c);
635 spin_unlock(&c->erase_completion_lock);
636
637 memset(c->wbuf,0xff,c->wbuf_pagesize);
638 /* adjust write buffer offset, else we get a non contiguous write bug */
639 c->wbuf_ofs += c->wbuf_pagesize;
640 c->wbuf_len = 0;
641 return 0;
642}
643
182ec4ee 644/* Trigger garbage collection to flush the write-buffer.
1da177e4 645 If ino arg is zero, do it if _any_ real (i.e. not GC) writes are
182ec4ee 646 outstanding. If ino arg non-zero, do it only if a write for the
1da177e4
LT
647 given inode is outstanding. */
648int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
649{
650 uint32_t old_wbuf_ofs;
651 uint32_t old_wbuf_len;
652 int ret = 0;
653
654 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino));
655
8aee6ac1
DW
656 if (!c->wbuf)
657 return 0;
658
1da177e4
LT
659 down(&c->alloc_sem);
660 if (!jffs2_wbuf_pending_for_ino(c, ino)) {
661 D1(printk(KERN_DEBUG "Ino #%d not pending in wbuf. Returning\n", ino));
662 up(&c->alloc_sem);
663 return 0;
664 }
665
666 old_wbuf_ofs = c->wbuf_ofs;
667 old_wbuf_len = c->wbuf_len;
668
669 if (c->unchecked_size) {
670 /* GC won't make any progress for a while */
671 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() padding. Not finished checking\n"));
672 down_write(&c->wbuf_sem);
673 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
7f716cf3
EH
674 /* retry flushing wbuf in case jffs2_wbuf_recover
675 left some data in the wbuf */
676 if (ret)
7f716cf3 677 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
1da177e4
LT
678 up_write(&c->wbuf_sem);
679 } else while (old_wbuf_len &&
680 old_wbuf_ofs == c->wbuf_ofs) {
681
682 up(&c->alloc_sem);
683
684 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() calls gc pass\n"));
685
686 ret = jffs2_garbage_collect_pass(c);
687 if (ret) {
688 /* GC failed. Flush it with padding instead */
689 down(&c->alloc_sem);
690 down_write(&c->wbuf_sem);
691 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
7f716cf3
EH
692 /* retry flushing wbuf in case jffs2_wbuf_recover
693 left some data in the wbuf */
694 if (ret)
7f716cf3 695 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
1da177e4
LT
696 up_write(&c->wbuf_sem);
697 break;
698 }
699 down(&c->alloc_sem);
700 }
701
702 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() ends...\n"));
703
704 up(&c->alloc_sem);
705 return ret;
706}
707
708/* Pad write-buffer to end and write it, wasting space. */
709int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
710{
711 int ret;
712
8aee6ac1
DW
713 if (!c->wbuf)
714 return 0;
715
1da177e4
LT
716 down_write(&c->wbuf_sem);
717 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
7f716cf3
EH
718 /* retry - maybe wbuf recover left some data in wbuf. */
719 if (ret)
720 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
1da177e4
LT
721 up_write(&c->wbuf_sem);
722
723 return ret;
724}
dcb09328
TG
725
726static size_t jffs2_fill_wbuf(struct jffs2_sb_info *c, const uint8_t *buf,
727 size_t len)
1da177e4 728{
dcb09328
TG
729 if (len && !c->wbuf_len && (len >= c->wbuf_pagesize))
730 return 0;
731
732 if (len > (c->wbuf_pagesize - c->wbuf_len))
733 len = c->wbuf_pagesize - c->wbuf_len;
734 memcpy(c->wbuf + c->wbuf_len, buf, len);
735 c->wbuf_len += (uint32_t) len;
736 return len;
737}
738
739int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
740 unsigned long count, loff_t to, size_t *retlen,
741 uint32_t ino)
742{
743 struct jffs2_eraseblock *jeb;
744 size_t wbuf_retlen, donelen = 0;
1da177e4 745 uint32_t outvec_to = to;
dcb09328 746 int ret, invec;
1da177e4 747
dcb09328 748 /* If not writebuffered flash, don't bother */
3be36675 749 if (!jffs2_is_writebuffered(c))
1da177e4 750 return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
182ec4ee 751
1da177e4
LT
752 down_write(&c->wbuf_sem);
753
754 /* If wbuf_ofs is not initialized, set it to target address */
755 if (c->wbuf_ofs == 0xFFFFFFFF) {
756 c->wbuf_ofs = PAGE_DIV(to);
182ec4ee 757 c->wbuf_len = PAGE_MOD(to);
1da177e4
LT
758 memset(c->wbuf,0xff,c->wbuf_pagesize);
759 }
760
dcb09328
TG
761 /*
762 * Sanity checks on target address. It's permitted to write
763 * at PAD(c->wbuf_len+c->wbuf_ofs), and it's permitted to
764 * write at the beginning of a new erase block. Anything else,
765 * and you die. New block starts at xxx000c (0-b = block
766 * header)
767 */
3be36675 768 if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
1da177e4
LT
769 /* It's a write to a new block */
770 if (c->wbuf_len) {
dcb09328
TG
771 D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx "
772 "causes flush of wbuf at 0x%08x\n",
773 (unsigned long)to, c->wbuf_ofs));
1da177e4 774 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
dcb09328
TG
775 if (ret)
776 goto outerr;
1da177e4
LT
777 }
778 /* set pointer to new block */
779 c->wbuf_ofs = PAGE_DIV(to);
182ec4ee
TG
780 c->wbuf_len = PAGE_MOD(to);
781 }
1da177e4
LT
782
783 if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
784 /* We're not writing immediately after the writebuffer. Bad. */
dcb09328
TG
785 printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write "
786 "to %08lx\n", (unsigned long)to);
1da177e4
LT
787 if (c->wbuf_len)
788 printk(KERN_CRIT "wbuf was previously %08x-%08x\n",
dcb09328 789 c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len);
1da177e4
LT
790 BUG();
791 }
792
dcb09328
TG
793 /* adjust alignment offset */
794 if (c->wbuf_len != PAGE_MOD(to)) {
795 c->wbuf_len = PAGE_MOD(to);
796 /* take care of alignment to next page */
797 if (!c->wbuf_len) {
798 c->wbuf_len = c->wbuf_pagesize;
799 ret = __jffs2_flush_wbuf(c, NOPAD);
800 if (ret)
801 goto outerr;
1da177e4
LT
802 }
803 }
804
dcb09328
TG
805 for (invec = 0; invec < count; invec++) {
806 int vlen = invecs[invec].iov_len;
807 uint8_t *v = invecs[invec].iov_base;
7f716cf3 808
dcb09328 809 wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
7f716cf3 810
dcb09328
TG
811 if (c->wbuf_len == c->wbuf_pagesize) {
812 ret = __jffs2_flush_wbuf(c, NOPAD);
813 if (ret)
814 goto outerr;
1da177e4 815 }
dcb09328
TG
816 vlen -= wbuf_retlen;
817 outvec_to += wbuf_retlen;
1da177e4 818 donelen += wbuf_retlen;
dcb09328
TG
819 v += wbuf_retlen;
820
821 if (vlen >= c->wbuf_pagesize) {
822 ret = c->mtd->write(c->mtd, outvec_to, PAGE_DIV(vlen),
823 &wbuf_retlen, v);
824 if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen))
825 goto outfile;
826
827 vlen -= wbuf_retlen;
828 outvec_to += wbuf_retlen;
829 c->wbuf_ofs = outvec_to;
830 donelen += wbuf_retlen;
831 v += wbuf_retlen;
1da177e4
LT
832 }
833
dcb09328
TG
834 wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
835 if (c->wbuf_len == c->wbuf_pagesize) {
836 ret = __jffs2_flush_wbuf(c, NOPAD);
837 if (ret)
838 goto outerr;
839 }
1da177e4 840
dcb09328
TG
841 outvec_to += wbuf_retlen;
842 donelen += wbuf_retlen;
1da177e4 843 }
1da177e4 844
dcb09328
TG
845 /*
846 * If there's a remainder in the wbuf and it's a non-GC write,
847 * remember that the wbuf affects this ino
848 */
1da177e4
LT
849 *retlen = donelen;
850
e631ddba
FH
851 if (jffs2_sum_active()) {
852 int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to);
853 if (res)
854 return res;
855 }
856
1da177e4
LT
857 if (c->wbuf_len && ino)
858 jffs2_wbuf_dirties_inode(c, ino);
859
860 ret = 0;
dcb09328
TG
861 up_write(&c->wbuf_sem);
862 return ret;
863
864outfile:
865 /*
866 * At this point we have no problem, c->wbuf is empty. However
867 * refile nextblock to avoid writing again to same address.
868 */
869
870 spin_lock(&c->erase_completion_lock);
871
872 jeb = &c->blocks[outvec_to / c->sector_size];
873 jffs2_block_refile(c, jeb, REFILE_ANYWAY);
874
875 spin_unlock(&c->erase_completion_lock);
182ec4ee 876
dcb09328
TG
877outerr:
878 *retlen = 0;
1da177e4
LT
879 up_write(&c->wbuf_sem);
880 return ret;
881}
882
883/*
884 * This is the entry for flash write.
885 * Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
886*/
9bfeb691
DW
887int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
888 size_t *retlen, const u_char *buf)
1da177e4
LT
889{
890 struct kvec vecs[1];
891
3be36675 892 if (!jffs2_is_writebuffered(c))
e631ddba 893 return jffs2_flash_direct_write(c, ofs, len, retlen, buf);
1da177e4
LT
894
895 vecs[0].iov_base = (unsigned char *) buf;
896 vecs[0].iov_len = len;
897 return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0);
898}
899
900/*
901 Handle readback from writebuffer and ECC failure return
902*/
903int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf)
904{
905 loff_t orbf = 0, owbf = 0, lwbf = 0;
906 int ret;
907
3be36675 908 if (!jffs2_is_writebuffered(c))
1da177e4
LT
909 return c->mtd->read(c->mtd, ofs, len, retlen, buf);
910
3be36675 911 /* Read flash */
894214d1 912 down_read(&c->wbuf_sem);
9223a456 913 ret = c->mtd->read(c->mtd, ofs, len, retlen, buf);
3be36675 914
9a1fcdfd
TG
915 if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) {
916 if (ret == -EBADMSG)
917 printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx)"
918 " returned ECC error\n", len, ofs);
182ec4ee 919 /*
9a1fcdfd
TG
920 * We have the raw data without ECC correction in the buffer,
921 * maybe we are lucky and all data or parts are correct. We
922 * check the node. If data are corrupted node check will sort
923 * it out. We keep this block, it will fail on write or erase
924 * and the we mark it bad. Or should we do that now? But we
925 * should give him a chance. Maybe we had a system crash or
926 * power loss before the ecc write or a erase was completed.
3be36675
AV
927 * So we return success. :)
928 */
9a1fcdfd 929 ret = 0;
182ec4ee 930 }
3be36675 931
1da177e4
LT
932 /* if no writebuffer available or write buffer empty, return */
933 if (!c->wbuf_pagesize || !c->wbuf_len)
894214d1 934 goto exit;
1da177e4
LT
935
936 /* if we read in a different block, return */
3be36675 937 if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs))
894214d1 938 goto exit;
1da177e4
LT
939
940 if (ofs >= c->wbuf_ofs) {
941 owbf = (ofs - c->wbuf_ofs); /* offset in write buffer */
942 if (owbf > c->wbuf_len) /* is read beyond write buffer ? */
943 goto exit;
944 lwbf = c->wbuf_len - owbf; /* number of bytes to copy */
182ec4ee 945 if (lwbf > len)
1da177e4 946 lwbf = len;
182ec4ee 947 } else {
1da177e4
LT
948 orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */
949 if (orbf > len) /* is write beyond write buffer ? */
950 goto exit;
9a1fcdfd 951 lwbf = len - orbf; /* number of bytes to copy */
182ec4ee 952 if (lwbf > c->wbuf_len)
1da177e4 953 lwbf = c->wbuf_len;
182ec4ee 954 }
1da177e4
LT
955 if (lwbf > 0)
956 memcpy(buf+orbf,c->wbuf+owbf,lwbf);
957
958exit:
959 up_read(&c->wbuf_sem);
960 return ret;
961}
962
a7a6ace1
AB
963#define NR_OOB_SCAN_PAGES 4
964
965/* For historical reasons we use only 12 bytes for OOB clean marker */
966#define OOB_CM_SIZE 12
967
968static const struct jffs2_unknown_node oob_cleanmarker =
969{
970 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
971 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
972 .totlen = cpu_to_je32(8)
973};
8593fbc6 974
1da177e4 975/*
a7a6ace1
AB
976 * Check, if the out of band area is empty. This function knows about the clean
977 * marker and if it is present in OOB, treats the OOB as empty anyway.
1da177e4 978 */
8593fbc6
TG
979int jffs2_check_oob_empty(struct jffs2_sb_info *c,
980 struct jffs2_eraseblock *jeb, int mode)
1da177e4 981{
a7a6ace1
AB
982 int i, ret;
983 int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
8593fbc6
TG
984 struct mtd_oob_ops ops;
985
a7a6ace1
AB
986 ops.mode = MTD_OOB_AUTO;
987 ops.ooblen = NR_OOB_SCAN_PAGES * c->oobavail;
8593fbc6 988 ops.oobbuf = c->oobbuf;
a7a6ace1 989 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
8593fbc6 990 ops.datbuf = NULL;
8593fbc6
TG
991
992 ret = c->mtd->read_oob(c->mtd, jeb->offset, &ops);
a7a6ace1 993 if (ret || ops.oobretlen != ops.ooblen) {
7be26bfb
AM
994 printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd"
995 " bytes, read %zd bytes, error %d\n",
996 jeb->offset, ops.ooblen, ops.oobretlen, ret);
a7a6ace1
AB
997 if (!ret)
998 ret = -EIO;
8593fbc6 999 return ret;
1da177e4 1000 }
182ec4ee 1001
a7a6ace1
AB
1002 for(i = 0; i < ops.ooblen; i++) {
1003 if (mode && i < cmlen)
1004 /* Yeah, we know about the cleanmarker */
1da177e4
LT
1005 continue;
1006
8593fbc6
TG
1007 if (ops.oobbuf[i] != 0xFF) {
1008 D2(printk(KERN_DEBUG "Found %02x at %x in OOB for "
1009 "%08x\n", ops.oobbuf[i], i, jeb->offset));
1010 return 1;
1da177e4
LT
1011 }
1012 }
1013
8593fbc6 1014 return 0;
1da177e4
LT
1015}
1016
1017/*
a7a6ace1
AB
1018 * Check for a valid cleanmarker.
1019 * Returns: 0 if a valid cleanmarker was found
1020 * 1 if no cleanmarker was found
1021 * negative error code if an error occurred
8593fbc6 1022 */
a7a6ace1
AB
1023int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c,
1024 struct jffs2_eraseblock *jeb)
1da177e4 1025{
8593fbc6 1026 struct mtd_oob_ops ops;
a7a6ace1 1027 int ret, cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
1da177e4 1028
a7a6ace1
AB
1029 ops.mode = MTD_OOB_AUTO;
1030 ops.ooblen = cmlen;
8593fbc6 1031 ops.oobbuf = c->oobbuf;
a7a6ace1 1032 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
8593fbc6 1033 ops.datbuf = NULL;
1da177e4 1034
a7a6ace1
AB
1035 ret = c->mtd->read_oob(c->mtd, jeb->offset, &ops);
1036 if (ret || ops.oobretlen != ops.ooblen) {
7be26bfb
AM
1037 printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd"
1038 " bytes, read %zd bytes, error %d\n",
1039 jeb->offset, ops.ooblen, ops.oobretlen, ret);
a7a6ace1
AB
1040 if (!ret)
1041 ret = -EIO;
8593fbc6
TG
1042 return ret;
1043 }
1da177e4 1044
a7a6ace1 1045 return !!memcmp(&oob_cleanmarker, c->oobbuf, cmlen);
1da177e4
LT
1046}
1047
8593fbc6
TG
1048int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c,
1049 struct jffs2_eraseblock *jeb)
1da177e4 1050{
a7a6ace1 1051 int ret;
8593fbc6 1052 struct mtd_oob_ops ops;
a7a6ace1 1053 int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
1da177e4 1054
a7a6ace1
AB
1055 ops.mode = MTD_OOB_AUTO;
1056 ops.ooblen = cmlen;
1057 ops.oobbuf = (uint8_t *)&oob_cleanmarker;
1058 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
8593fbc6 1059 ops.datbuf = NULL;
8593fbc6
TG
1060
1061 ret = c->mtd->write_oob(c->mtd, jeb->offset, &ops);
a7a6ace1 1062 if (ret || ops.oobretlen != ops.ooblen) {
7be26bfb
AM
1063 printk(KERN_ERR "cannot write OOB for EB at %08x, requested %zd"
1064 " bytes, read %zd bytes, error %d\n",
1065 jeb->offset, ops.ooblen, ops.oobretlen, ret);
a7a6ace1
AB
1066 if (!ret)
1067 ret = -EIO;
1da177e4
LT
1068 return ret;
1069 }
a7a6ace1 1070
1da177e4
LT
1071 return 0;
1072}
1073
182ec4ee 1074/*
1da177e4
LT
1075 * On NAND we try to mark this block bad. If the block was erased more
1076 * than MAX_ERASE_FAILURES we mark it finaly bad.
1077 * Don't care about failures. This block remains on the erase-pending
1078 * or badblock list as long as nobody manipulates the flash with
1079 * a bootloader or something like that.
1080 */
1081
1082int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
1083{
1084 int ret;
1085
1086 /* if the count is < max, we try to write the counter to the 2nd page oob area */
1087 if( ++jeb->bad_count < MAX_ERASE_FAILURES)
1088 return 0;
1089
1090 if (!c->mtd->block_markbad)
1091 return 1; // What else can we do?
1092
0feba829 1093 printk(KERN_WARNING "JFFS2: marking eraseblock at %08x\n as bad", bad_offset);
1da177e4 1094 ret = c->mtd->block_markbad(c->mtd, bad_offset);
182ec4ee 1095
1da177e4
LT
1096 if (ret) {
1097 D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
1098 return ret;
1099 }
1100 return 1;
1101}
1102
a7a6ace1 1103int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
1da177e4 1104{
5bd34c09 1105 struct nand_ecclayout *oinfo = c->mtd->ecclayout;
1da177e4 1106
1da177e4
LT
1107 if (!c->mtd->oobsize)
1108 return 0;
182ec4ee 1109
1da177e4
LT
1110 /* Cleanmarker is out-of-band, so inline size zero */
1111 c->cleanmarker_size = 0;
1112
a7a6ace1
AB
1113 if (!oinfo || oinfo->oobavail == 0) {
1114 printk(KERN_ERR "inconsistent device description\n");
5bd34c09
TG
1115 return -EINVAL;
1116 }
182ec4ee 1117
a7a6ace1 1118 D1(printk(KERN_DEBUG "JFFS2 using OOB on NAND\n"));
5bd34c09 1119
a7a6ace1 1120 c->oobavail = oinfo->oobavail;
1da177e4
LT
1121
1122 /* Initialise write buffer */
1123 init_rwsem(&c->wbuf_sem);
28318776 1124 c->wbuf_pagesize = c->mtd->writesize;
1da177e4 1125 c->wbuf_ofs = 0xFFFFFFFF;
182ec4ee 1126
1da177e4
LT
1127 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1128 if (!c->wbuf)
1129 return -ENOMEM;
1130
a7a6ace1
AB
1131 c->oobbuf = kmalloc(NR_OOB_SCAN_PAGES * c->oobavail, GFP_KERNEL);
1132 if (!c->oobbuf) {
1da177e4
LT
1133 kfree(c->wbuf);
1134 return -ENOMEM;
1135 }
a7a6ace1
AB
1136
1137 return 0;
1da177e4
LT
1138}
1139
1140void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
1141{
1142 kfree(c->wbuf);
8593fbc6 1143 kfree(c->oobbuf);
1da177e4
LT
1144}
1145
8f15fd55
AV
1146int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
1147 c->cleanmarker_size = 0; /* No cleanmarkers needed */
182ec4ee 1148
8f15fd55
AV
1149 /* Initialize write buffer */
1150 init_rwsem(&c->wbuf_sem);
8f15fd55 1151
182ec4ee 1152
daba5cc4 1153 c->wbuf_pagesize = c->mtd->erasesize;
182ec4ee 1154
daba5cc4
AB
1155 /* Find a suitable c->sector_size
1156 * - Not too much sectors
1157 * - Sectors have to be at least 4 K + some bytes
1158 * - All known dataflashes have erase sizes of 528 or 1056
1159 * - we take at least 8 eraseblocks and want to have at least 8K size
1160 * - The concatenation should be a power of 2
1161 */
1162
1163 c->sector_size = 8 * c->mtd->erasesize;
182ec4ee 1164
daba5cc4
AB
1165 while (c->sector_size < 8192) {
1166 c->sector_size *= 2;
1167 }
182ec4ee 1168
daba5cc4
AB
1169 /* It may be necessary to adjust the flash size */
1170 c->flash_size = c->mtd->size;
8f15fd55 1171
daba5cc4
AB
1172 if ((c->flash_size % c->sector_size) != 0) {
1173 c->flash_size = (c->flash_size / c->sector_size) * c->sector_size;
1174 printk(KERN_WARNING "JFFS2 flash size adjusted to %dKiB\n", c->flash_size);
1175 };
182ec4ee 1176
daba5cc4 1177 c->wbuf_ofs = 0xFFFFFFFF;
8f15fd55
AV
1178 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1179 if (!c->wbuf)
1180 return -ENOMEM;
1181
daba5cc4 1182 printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size);
8f15fd55
AV
1183
1184 return 0;
1185}
1186
1187void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) {
1188 kfree(c->wbuf);
1189}
8f15fd55 1190
59da721a 1191int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
c8b229de
JE
1192 /* Cleanmarker currently occupies whole programming regions,
1193 * either one or 2 for 8Byte STMicro flashes. */
1194 c->cleanmarker_size = max(16u, c->mtd->writesize);
59da721a
NP
1195
1196 /* Initialize write buffer */
1197 init_rwsem(&c->wbuf_sem);
28318776 1198 c->wbuf_pagesize = c->mtd->writesize;
59da721a
NP
1199 c->wbuf_ofs = 0xFFFFFFFF;
1200
1201 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1202 if (!c->wbuf)
1203 return -ENOMEM;
1204
1205 return 0;
1206}
1207
1208void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) {
1209 kfree(c->wbuf);
1210}