Merge tag 'v3.10.59' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / jffs2 / fs.c
1 /*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright © 2001-2007 Red Hat, Inc.
5 * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
6 *
7 * Created by David Woodhouse <dwmw2@infradead.org>
8 *
9 * For licensing information, see the file 'LICENCE' in this directory.
10 *
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/capability.h>
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
18 #include <linux/fs.h>
19 #include <linux/list.h>
20 #include <linux/mtd/mtd.h>
21 #include <linux/pagemap.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/vfs.h>
25 #include <linux/crc32.h>
26 #include "nodelist.h"
27
28 static int jffs2_flash_setup(struct jffs2_sb_info *c);
29
30 int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
31 {
32 struct jffs2_full_dnode *old_metadata, *new_metadata;
33 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
34 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
35 struct jffs2_raw_inode *ri;
36 union jffs2_device_node dev;
37 unsigned char *mdata = NULL;
38 int mdatalen = 0;
39 unsigned int ivalid;
40 uint32_t alloclen;
41 int ret;
42 int alloc_type = ALLOC_NORMAL;
43
44 jffs2_dbg(1, "%s(): ino #%lu\n", __func__, inode->i_ino);
45
46 /* Special cases - we don't want more than one data node
47 for these types on the medium at any time. So setattr
48 must read the original data associated with the node
49 (i.e. the device numbers or the target name) and write
50 it out again with the appropriate data attached */
51 if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
52 /* For these, we don't actually need to read the old node */
53 mdatalen = jffs2_encode_dev(&dev, inode->i_rdev);
54 mdata = (char *)&dev;
55 jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n",
56 __func__, mdatalen);
57 } else if (S_ISLNK(inode->i_mode)) {
58 mutex_lock(&f->sem);
59 mdatalen = f->metadata->size;
60 mdata = kmalloc(f->metadata->size, GFP_USER);
61 if (!mdata) {
62 mutex_unlock(&f->sem);
63 return -ENOMEM;
64 }
65 ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen);
66 if (ret) {
67 mutex_unlock(&f->sem);
68 kfree(mdata);
69 return ret;
70 }
71 mutex_unlock(&f->sem);
72 jffs2_dbg(1, "%s(): Writing %d bytes of symlink target\n",
73 __func__, mdatalen);
74 }
75
76 ri = jffs2_alloc_raw_inode();
77 if (!ri) {
78 if (S_ISLNK(inode->i_mode))
79 kfree(mdata);
80 return -ENOMEM;
81 }
82
83 ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &alloclen,
84 ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
85 if (ret) {
86 jffs2_free_raw_inode(ri);
87 if (S_ISLNK(inode->i_mode))
88 kfree(mdata);
89 return ret;
90 }
91 mutex_lock(&f->sem);
92 ivalid = iattr->ia_valid;
93
94 ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
95 ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
96 ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen);
97 ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
98
99 ri->ino = cpu_to_je32(inode->i_ino);
100 ri->version = cpu_to_je32(++f->highest_version);
101
102 ri->uid = cpu_to_je16((ivalid & ATTR_UID)?
103 from_kuid(&init_user_ns, iattr->ia_uid):i_uid_read(inode));
104 ri->gid = cpu_to_je16((ivalid & ATTR_GID)?
105 from_kgid(&init_user_ns, iattr->ia_gid):i_gid_read(inode));
106
107 if (ivalid & ATTR_MODE)
108 ri->mode = cpu_to_jemode(iattr->ia_mode);
109 else
110 ri->mode = cpu_to_jemode(inode->i_mode);
111
112
113 ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size);
114 ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime));
115 ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime));
116 ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime));
117
118 ri->offset = cpu_to_je32(0);
119 ri->csize = ri->dsize = cpu_to_je32(mdatalen);
120 ri->compr = JFFS2_COMPR_NONE;
121 if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
122 /* It's an extension. Make it a hole node */
123 ri->compr = JFFS2_COMPR_ZERO;
124 ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size);
125 ri->offset = cpu_to_je32(inode->i_size);
126 } else if (ivalid & ATTR_SIZE && !iattr->ia_size) {
127 /* For truncate-to-zero, treat it as deletion because
128 it'll always be obsoleting all previous nodes */
129 alloc_type = ALLOC_DELETION;
130 }
131 ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
132 if (mdatalen)
133 ri->data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
134 else
135 ri->data_crc = cpu_to_je32(0);
136
137 new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, alloc_type);
138 if (S_ISLNK(inode->i_mode))
139 kfree(mdata);
140
141 if (IS_ERR(new_metadata)) {
142 jffs2_complete_reservation(c);
143 jffs2_free_raw_inode(ri);
144 mutex_unlock(&f->sem);
145 return PTR_ERR(new_metadata);
146 }
147 /* It worked. Update the inode */
148 inode->i_atime = ITIME(je32_to_cpu(ri->atime));
149 inode->i_ctime = ITIME(je32_to_cpu(ri->ctime));
150 inode->i_mtime = ITIME(je32_to_cpu(ri->mtime));
151 inode->i_mode = jemode_to_cpu(ri->mode);
152 i_uid_write(inode, je16_to_cpu(ri->uid));
153 i_gid_write(inode, je16_to_cpu(ri->gid));
154
155
156 old_metadata = f->metadata;
157
158 if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
159 jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size);
160
161 if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
162 jffs2_add_full_dnode_to_inode(c, f, new_metadata);
163 inode->i_size = iattr->ia_size;
164 inode->i_blocks = (inode->i_size + 511) >> 9;
165 f->metadata = NULL;
166 } else {
167 f->metadata = new_metadata;
168 }
169 if (old_metadata) {
170 jffs2_mark_node_obsolete(c, old_metadata->raw);
171 jffs2_free_full_dnode(old_metadata);
172 }
173 jffs2_free_raw_inode(ri);
174
175 mutex_unlock(&f->sem);
176 jffs2_complete_reservation(c);
177
178 /* We have to do the truncate_setsize() without f->sem held, since
179 some pages may be locked and waiting for it in readpage().
180 We are protected from a simultaneous write() extending i_size
181 back past iattr->ia_size, because do_truncate() holds the
182 generic inode semaphore. */
183 if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) {
184 truncate_setsize(inode, iattr->ia_size);
185 inode->i_blocks = (inode->i_size + 511) >> 9;
186 }
187
188 return 0;
189 }
190
191 int jffs2_setattr(struct dentry *dentry, struct iattr *iattr)
192 {
193 int rc;
194
195 rc = inode_change_ok(dentry->d_inode, iattr);
196 if (rc)
197 return rc;
198
199 rc = jffs2_do_setattr(dentry->d_inode, iattr);
200 if (!rc && (iattr->ia_valid & ATTR_MODE))
201 rc = jffs2_acl_chmod(dentry->d_inode);
202
203 return rc;
204 }
205
206 int jffs2_statfs(struct dentry *dentry, struct kstatfs *buf)
207 {
208 struct jffs2_sb_info *c = JFFS2_SB_INFO(dentry->d_sb);
209 unsigned long avail;
210
211 buf->f_type = JFFS2_SUPER_MAGIC;
212 buf->f_bsize = 1 << PAGE_SHIFT;
213 buf->f_blocks = c->flash_size >> PAGE_SHIFT;
214 buf->f_files = 0;
215 buf->f_ffree = 0;
216 buf->f_namelen = JFFS2_MAX_NAME_LEN;
217 buf->f_fsid.val[0] = JFFS2_SUPER_MAGIC;
218 buf->f_fsid.val[1] = c->mtd->index;
219
220 spin_lock(&c->erase_completion_lock);
221 avail = c->dirty_size + c->free_size;
222 if (avail > c->sector_size * c->resv_blocks_write)
223 avail -= c->sector_size * c->resv_blocks_write;
224 else
225 avail = 0;
226 spin_unlock(&c->erase_completion_lock);
227
228 buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT;
229
230 return 0;
231 }
232
233
234 void jffs2_evict_inode (struct inode *inode)
235 {
236 /* We can forget about this inode for now - drop all
237 * the nodelists associated with it, etc.
238 */
239 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
240 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
241
242 jffs2_dbg(1, "%s(): ino #%lu mode %o\n",
243 __func__, inode->i_ino, inode->i_mode);
244 truncate_inode_pages(&inode->i_data, 0);
245 clear_inode(inode);
246 jffs2_do_clear_inode(c, f);
247 }
248
249 struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
250 {
251 struct jffs2_inode_info *f;
252 struct jffs2_sb_info *c;
253 struct jffs2_raw_inode latest_node;
254 union jffs2_device_node jdev;
255 struct inode *inode;
256 dev_t rdev = 0;
257 int ret;
258
259 jffs2_dbg(1, "%s(): ino == %lu\n", __func__, ino);
260
261 inode = iget_locked(sb, ino);
262 if (!inode)
263 return ERR_PTR(-ENOMEM);
264 if (!(inode->i_state & I_NEW))
265 return inode;
266
267 f = JFFS2_INODE_INFO(inode);
268 c = JFFS2_SB_INFO(inode->i_sb);
269
270 jffs2_init_inode_info(f);
271 mutex_lock(&f->sem);
272
273 ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node);
274
275 if (ret) {
276 mutex_unlock(&f->sem);
277 iget_failed(inode);
278 return ERR_PTR(ret);
279 }
280 inode->i_mode = jemode_to_cpu(latest_node.mode);
281 i_uid_write(inode, je16_to_cpu(latest_node.uid));
282 i_gid_write(inode, je16_to_cpu(latest_node.gid));
283 inode->i_size = je32_to_cpu(latest_node.isize);
284 inode->i_atime = ITIME(je32_to_cpu(latest_node.atime));
285 inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime));
286 inode->i_ctime = ITIME(je32_to_cpu(latest_node.ctime));
287
288 set_nlink(inode, f->inocache->pino_nlink);
289
290 inode->i_blocks = (inode->i_size + 511) >> 9;
291
292 switch (inode->i_mode & S_IFMT) {
293
294 case S_IFLNK:
295 inode->i_op = &jffs2_symlink_inode_operations;
296 break;
297
298 case S_IFDIR:
299 {
300 struct jffs2_full_dirent *fd;
301 set_nlink(inode, 2); /* parent and '.' */
302
303 for (fd=f->dents; fd; fd = fd->next) {
304 if (fd->type == DT_DIR && fd->ino)
305 inc_nlink(inode);
306 }
307 /* Root dir gets i_nlink 3 for some reason */
308 if (inode->i_ino == 1)
309 inc_nlink(inode);
310
311 inode->i_op = &jffs2_dir_inode_operations;
312 inode->i_fop = &jffs2_dir_operations;
313 break;
314 }
315 case S_IFREG:
316 inode->i_op = &jffs2_file_inode_operations;
317 inode->i_fop = &jffs2_file_operations;
318 inode->i_mapping->a_ops = &jffs2_file_address_operations;
319 inode->i_mapping->nrpages = 0;
320 break;
321
322 case S_IFBLK:
323 case S_IFCHR:
324 /* Read the device numbers from the media */
325 if (f->metadata->size != sizeof(jdev.old_id) &&
326 f->metadata->size != sizeof(jdev.new_id)) {
327 pr_notice("Device node has strange size %d\n",
328 f->metadata->size);
329 goto error_io;
330 }
331 jffs2_dbg(1, "Reading device numbers from flash\n");
332 ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size);
333 if (ret < 0) {
334 /* Eep */
335 pr_notice("Read device numbers for inode %lu failed\n",
336 (unsigned long)inode->i_ino);
337 goto error;
338 }
339 if (f->metadata->size == sizeof(jdev.old_id))
340 rdev = old_decode_dev(je16_to_cpu(jdev.old_id));
341 else
342 rdev = new_decode_dev(je32_to_cpu(jdev.new_id));
343
344 case S_IFSOCK:
345 case S_IFIFO:
346 inode->i_op = &jffs2_file_inode_operations;
347 init_special_inode(inode, inode->i_mode, rdev);
348 break;
349
350 default:
351 pr_warn("%s(): Bogus i_mode %o for ino %lu\n",
352 __func__, inode->i_mode, (unsigned long)inode->i_ino);
353 }
354
355 mutex_unlock(&f->sem);
356
357 jffs2_dbg(1, "jffs2_read_inode() returning\n");
358 unlock_new_inode(inode);
359 return inode;
360
361 error_io:
362 ret = -EIO;
363 error:
364 mutex_unlock(&f->sem);
365 jffs2_do_clear_inode(c, f);
366 iget_failed(inode);
367 return ERR_PTR(ret);
368 }
369
370 void jffs2_dirty_inode(struct inode *inode, int flags)
371 {
372 struct iattr iattr;
373
374 if (!(inode->i_state & I_DIRTY_DATASYNC)) {
375 jffs2_dbg(2, "%s(): not calling setattr() for ino #%lu\n",
376 __func__, inode->i_ino);
377 return;
378 }
379
380 jffs2_dbg(1, "%s(): calling setattr() for ino #%lu\n",
381 __func__, inode->i_ino);
382
383 iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME;
384 iattr.ia_mode = inode->i_mode;
385 iattr.ia_uid = inode->i_uid;
386 iattr.ia_gid = inode->i_gid;
387 iattr.ia_atime = inode->i_atime;
388 iattr.ia_mtime = inode->i_mtime;
389 iattr.ia_ctime = inode->i_ctime;
390
391 jffs2_do_setattr(inode, &iattr);
392 }
393
394 int jffs2_do_remount_fs(struct super_block *sb, int *flags, char *data)
395 {
396 struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
397
398 if (c->flags & JFFS2_SB_FLAG_RO && !(sb->s_flags & MS_RDONLY))
399 return -EROFS;
400
401 /* We stop if it was running, then restart if it needs to.
402 This also catches the case where it was stopped and this
403 is just a remount to restart it.
404 Flush the writebuffer, if neccecary, else we loose it */
405 if (!(sb->s_flags & MS_RDONLY)) {
406 jffs2_stop_garbage_collect_thread(c);
407 mutex_lock(&c->alloc_sem);
408 jffs2_flush_wbuf_pad(c);
409 mutex_unlock(&c->alloc_sem);
410 }
411
412 if (!(*flags & MS_RDONLY))
413 jffs2_start_garbage_collect_thread(c);
414
415 *flags |= MS_NOATIME;
416 return 0;
417 }
418
419 /* jffs2_new_inode: allocate a new inode and inocache, add it to the hash,
420 fill in the raw_inode while you're at it. */
421 struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_raw_inode *ri)
422 {
423 struct inode *inode;
424 struct super_block *sb = dir_i->i_sb;
425 struct jffs2_sb_info *c;
426 struct jffs2_inode_info *f;
427 int ret;
428
429 jffs2_dbg(1, "%s(): dir_i %ld, mode 0x%x\n",
430 __func__, dir_i->i_ino, mode);
431
432 c = JFFS2_SB_INFO(sb);
433
434 inode = new_inode(sb);
435
436 if (!inode)
437 return ERR_PTR(-ENOMEM);
438
439 f = JFFS2_INODE_INFO(inode);
440 jffs2_init_inode_info(f);
441 mutex_lock(&f->sem);
442
443 memset(ri, 0, sizeof(*ri));
444 /* Set OS-specific defaults for new inodes */
445 ri->uid = cpu_to_je16(from_kuid(&init_user_ns, current_fsuid()));
446
447 if (dir_i->i_mode & S_ISGID) {
448 ri->gid = cpu_to_je16(i_gid_read(dir_i));
449 if (S_ISDIR(mode))
450 mode |= S_ISGID;
451 } else {
452 ri->gid = cpu_to_je16(from_kgid(&init_user_ns, current_fsgid()));
453 }
454
455 /* POSIX ACLs have to be processed now, at least partly.
456 The umask is only applied if there's no default ACL */
457 ret = jffs2_init_acl_pre(dir_i, inode, &mode);
458 if (ret) {
459 make_bad_inode(inode);
460 iput(inode);
461 return ERR_PTR(ret);
462 }
463 ret = jffs2_do_new_inode (c, f, mode, ri);
464 if (ret) {
465 make_bad_inode(inode);
466 iput(inode);
467 return ERR_PTR(ret);
468 }
469 set_nlink(inode, 1);
470 inode->i_ino = je32_to_cpu(ri->ino);
471 inode->i_mode = jemode_to_cpu(ri->mode);
472 i_gid_write(inode, je16_to_cpu(ri->gid));
473 i_uid_write(inode, je16_to_cpu(ri->uid));
474 inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
475 ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime));
476
477 inode->i_blocks = 0;
478 inode->i_size = 0;
479
480 if (insert_inode_locked(inode) < 0) {
481 make_bad_inode(inode);
482 iput(inode);
483 return ERR_PTR(-EINVAL);
484 }
485
486 return inode;
487 }
488
489 static int calculate_inocache_hashsize(uint32_t flash_size)
490 {
491 /*
492 * Pick a inocache hash size based on the size of the medium.
493 * Count how many megabytes we're dealing with, apply a hashsize twice
494 * that size, but rounding down to the usual big powers of 2. And keep
495 * to sensible bounds.
496 */
497
498 int size_mb = flash_size / 1024 / 1024;
499 int hashsize = (size_mb * 2) & ~0x3f;
500
501 if (hashsize < INOCACHE_HASHSIZE_MIN)
502 return INOCACHE_HASHSIZE_MIN;
503 if (hashsize > INOCACHE_HASHSIZE_MAX)
504 return INOCACHE_HASHSIZE_MAX;
505
506 return hashsize;
507 }
508
509 int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
510 {
511 struct jffs2_sb_info *c;
512 struct inode *root_i;
513 int ret;
514 size_t blocks;
515
516 c = JFFS2_SB_INFO(sb);
517
518 #ifndef CONFIG_JFFS2_FS_WRITEBUFFER
519 if (c->mtd->type == MTD_NANDFLASH) {
520 pr_err("Cannot operate on NAND flash unless jffs2 NAND support is compiled in\n");
521 return -EINVAL;
522 }
523 if (c->mtd->type == MTD_DATAFLASH) {
524 pr_err("Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in\n");
525 return -EINVAL;
526 }
527 #endif
528
529 c->flash_size = c->mtd->size;
530 c->sector_size = c->mtd->erasesize;
531 blocks = c->flash_size / c->sector_size;
532
533 /*
534 * Size alignment check
535 */
536 if ((c->sector_size * blocks) != c->flash_size) {
537 c->flash_size = c->sector_size * blocks;
538 pr_info("Flash size not aligned to erasesize, reducing to %dKiB\n",
539 c->flash_size / 1024);
540 }
541
542 if (c->flash_size < 5*c->sector_size) {
543 pr_err("Too few erase blocks (%d)\n",
544 c->flash_size / c->sector_size);
545 return -EINVAL;
546 }
547
548 c->cleanmarker_size = sizeof(struct jffs2_unknown_node);
549
550 /* NAND (or other bizarre) flash... do setup accordingly */
551 ret = jffs2_flash_setup(c);
552 if (ret)
553 return ret;
554
555 c->inocache_hashsize = calculate_inocache_hashsize(c->flash_size);
556 c->inocache_list = kcalloc(c->inocache_hashsize, sizeof(struct jffs2_inode_cache *), GFP_KERNEL);
557 if (!c->inocache_list) {
558 ret = -ENOMEM;
559 goto out_wbuf;
560 }
561
562 jffs2_init_xattr_subsystem(c);
563
564 if ((ret = jffs2_do_mount_fs(c)))
565 goto out_inohash;
566
567 jffs2_dbg(1, "%s(): Getting root inode\n", __func__);
568 root_i = jffs2_iget(sb, 1);
569 if (IS_ERR(root_i)) {
570 jffs2_dbg(1, "get root inode failed\n");
571 ret = PTR_ERR(root_i);
572 goto out_root;
573 }
574
575 ret = -ENOMEM;
576
577 jffs2_dbg(1, "%s(): d_make_root()\n", __func__);
578 sb->s_root = d_make_root(root_i);
579 if (!sb->s_root)
580 goto out_root;
581
582 sb->s_maxbytes = 0xFFFFFFFF;
583 sb->s_blocksize = PAGE_CACHE_SIZE;
584 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
585 sb->s_magic = JFFS2_SUPER_MAGIC;
586 if (!(sb->s_flags & MS_RDONLY))
587 jffs2_start_garbage_collect_thread(c);
588 return 0;
589
590 out_root:
591 jffs2_free_ino_caches(c);
592 jffs2_free_raw_node_refs(c);
593 if (jffs2_blocks_use_vmalloc(c))
594 vfree(c->blocks);
595 else
596 kfree(c->blocks);
597 out_inohash:
598 jffs2_clear_xattr_subsystem(c);
599 kfree(c->inocache_list);
600 out_wbuf:
601 jffs2_flash_cleanup(c);
602
603 return ret;
604 }
605
606 void jffs2_gc_release_inode(struct jffs2_sb_info *c,
607 struct jffs2_inode_info *f)
608 {
609 iput(OFNI_EDONI_2SFFJ(f));
610 }
611
612 struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
613 int inum, int unlinked)
614 {
615 struct inode *inode;
616 struct jffs2_inode_cache *ic;
617
618 if (unlinked) {
619 /* The inode has zero nlink but its nodes weren't yet marked
620 obsolete. This has to be because we're still waiting for
621 the final (close() and) iput() to happen.
622
623 There's a possibility that the final iput() could have
624 happened while we were contemplating. In order to ensure
625 that we don't cause a new read_inode() (which would fail)
626 for the inode in question, we use ilookup() in this case
627 instead of iget().
628
629 The nlink can't _become_ zero at this point because we're
630 holding the alloc_sem, and jffs2_do_unlink() would also
631 need that while decrementing nlink on any inode.
632 */
633 inode = ilookup(OFNI_BS_2SFFJ(c), inum);
634 if (!inode) {
635 jffs2_dbg(1, "ilookup() failed for ino #%u; inode is probably deleted.\n",
636 inum);
637
638 spin_lock(&c->inocache_lock);
639 ic = jffs2_get_ino_cache(c, inum);
640 if (!ic) {
641 jffs2_dbg(1, "Inode cache for ino #%u is gone\n",
642 inum);
643 spin_unlock(&c->inocache_lock);
644 return NULL;
645 }
646 if (ic->state != INO_STATE_CHECKEDABSENT) {
647 /* Wait for progress. Don't just loop */
648 jffs2_dbg(1, "Waiting for ino #%u in state %d\n",
649 ic->ino, ic->state);
650 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
651 } else {
652 spin_unlock(&c->inocache_lock);
653 }
654
655 return NULL;
656 }
657 } else {
658 /* Inode has links to it still; they're not going away because
659 jffs2_do_unlink() would need the alloc_sem and we have it.
660 Just iget() it, and if read_inode() is necessary that's OK.
661 */
662 inode = jffs2_iget(OFNI_BS_2SFFJ(c), inum);
663 if (IS_ERR(inode))
664 return ERR_CAST(inode);
665 }
666 if (is_bad_inode(inode)) {
667 pr_notice("Eep. read_inode() failed for ino #%u. unlinked %d\n",
668 inum, unlinked);
669 /* NB. This will happen again. We need to do something appropriate here. */
670 iput(inode);
671 return ERR_PTR(-EIO);
672 }
673
674 return JFFS2_INODE_INFO(inode);
675 }
676
677 unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
678 struct jffs2_inode_info *f,
679 unsigned long offset,
680 unsigned long *priv)
681 {
682 struct inode *inode = OFNI_EDONI_2SFFJ(f);
683 struct page *pg;
684
685 pg = read_cache_page_async(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
686 (void *)jffs2_do_readpage_unlock, inode);
687 if (IS_ERR(pg))
688 return (void *)pg;
689
690 *priv = (unsigned long)pg;
691 return kmap(pg);
692 }
693
694 void jffs2_gc_release_page(struct jffs2_sb_info *c,
695 unsigned char *ptr,
696 unsigned long *priv)
697 {
698 struct page *pg = (void *)*priv;
699
700 kunmap(pg);
701 page_cache_release(pg);
702 }
703
704 static int jffs2_flash_setup(struct jffs2_sb_info *c) {
705 int ret = 0;
706
707 if (jffs2_cleanmarker_oob(c)) {
708 /* NAND flash... do setup accordingly */
709 ret = jffs2_nand_flash_setup(c);
710 if (ret)
711 return ret;
712 }
713
714 /* and Dataflash */
715 if (jffs2_dataflash(c)) {
716 ret = jffs2_dataflash_setup(c);
717 if (ret)
718 return ret;
719 }
720
721 /* and Intel "Sibley" flash */
722 if (jffs2_nor_wbuf_flash(c)) {
723 ret = jffs2_nor_wbuf_flash_setup(c);
724 if (ret)
725 return ret;
726 }
727
728 /* and an UBI volume */
729 if (jffs2_ubivol(c)) {
730 ret = jffs2_ubivol_setup(c);
731 if (ret)
732 return ret;
733 }
734
735 return ret;
736 }
737
738 void jffs2_flash_cleanup(struct jffs2_sb_info *c) {
739
740 if (jffs2_cleanmarker_oob(c)) {
741 jffs2_nand_flash_cleanup(c);
742 }
743
744 /* and DataFlash */
745 if (jffs2_dataflash(c)) {
746 jffs2_dataflash_cleanup(c);
747 }
748
749 /* and Intel "Sibley" flash */
750 if (jffs2_nor_wbuf_flash(c)) {
751 jffs2_nor_wbuf_flash_cleanup(c);
752 }
753
754 /* and an UBI volume */
755 if (jffs2_ubivol(c)) {
756 jffs2_ubivol_cleanup(c);
757 }
758 }