2 * This file is part of UBIFS.
4 * Copyright (C) 2006-2008 Nokia Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 51
17 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 * Authors: Artem Bityutskiy (Битюцкий Артём)
24 * This file implements VFS file and inode operations for regular files, device
25 * nodes and symlinks as well as address space operations.
27 * UBIFS uses 2 page flags: @PG_private and @PG_checked. @PG_private is set if
28 * the page is dirty and is used for optimization purposes - dirty pages are
29 * not budgeted so the flag shows that 'ubifs_write_end()' should not release
30 * the budget for this page. The @PG_checked flag is set if full budgeting is
31 * required for the page e.g., when it corresponds to a file hole or it is
32 * beyond the file size. The budgeting is done in 'ubifs_write_begin()', because
33 * it is OK to fail in this function, and the budget is released in
34 * 'ubifs_write_end()'. So the @PG_private and @PG_checked flags carry
35 * information about how the page was budgeted, to make it possible to release
36 * the budget properly.
38 * A thing to keep in mind: inode @i_mutex is locked in most VFS operations we
39 * implement. However, this is not true for 'ubifs_writepage()', which may be
40 * called with @i_mutex unlocked. For example, when flusher thread is doing
41 * background write-back, it calls 'ubifs_writepage()' with unlocked @i_mutex.
42 * At "normal" work-paths the @i_mutex is locked in 'ubifs_writepage()', e.g.
43 * in the "sys_write -> alloc_pages -> direct reclaim path". So, in
44 * 'ubifs_writepage()' we are only guaranteed that the page is locked.
46 * Similarly, @i_mutex is not always locked in 'ubifs_readpage()', e.g., the
47 * read-ahead path does not lock it ("sys_read -> generic_file_aio_read ->
48 * ondemand_readahead -> readpage"). In case of readahead, @I_SYNC flag is not
49 * set as well. However, UBIFS disables readahead.
53 #include <linux/aio.h>
54 #include <linux/mount.h>
55 #include <linux/namei.h>
56 #include <linux/slab.h>
57 #include <linux/migrate.h>
59 static int read_block(struct inode
*inode
, void *addr
, unsigned int block
,
60 struct ubifs_data_node
*dn
)
62 struct ubifs_info
*c
= inode
->i_sb
->s_fs_info
;
63 int err
, len
, out_len
;
67 data_key_init(c
, &key
, inode
->i_ino
, block
);
68 err
= ubifs_tnc_lookup(c
, &key
, dn
);
71 /* Not found, so it must be a hole */
72 memset(addr
, 0, UBIFS_BLOCK_SIZE
);
76 ubifs_assert(le64_to_cpu(dn
->ch
.sqnum
) >
77 ubifs_inode(inode
)->creat_sqnum
);
78 len
= le32_to_cpu(dn
->size
);
79 if (len
<= 0 || len
> UBIFS_BLOCK_SIZE
)
82 dlen
= le32_to_cpu(dn
->ch
.len
) - UBIFS_DATA_NODE_SZ
;
84 if (UBIFS_COMPR_LZ4K
== le16_to_cpu(dn
->compr_type
))
85 out_len
= len
; //Jack modify for lz4k decompress
87 out_len
= UBIFS_BLOCK_SIZE
;
88 err
= ubifs_decompress(&dn
->data
, dlen
, addr
, &out_len
,
89 le16_to_cpu(dn
->compr_type
));
90 if (err
|| len
!= out_len
)
94 * Data length can be less than a full block, even for blocks that are
95 * not the last in the file (e.g., as a result of making a hole and
96 * appending data). Ensure that the remainder is zeroed out.
98 if (len
< UBIFS_BLOCK_SIZE
)
99 memset(addr
+ len
, 0, UBIFS_BLOCK_SIZE
- len
);
104 ubifs_err("bad data node (block %u, inode %lu)",
105 block
, inode
->i_ino
);
106 ubifs_dump_node(c
, dn
);
110 static int do_readpage(struct page
*page
)
114 unsigned int block
, beyond
;
115 struct ubifs_data_node
*dn
;
116 struct inode
*inode
= page
->mapping
->host
;
117 loff_t i_size
= i_size_read(inode
);
119 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
120 inode
->i_ino
, page
->index
, i_size
, page
->flags
);
121 ubifs_assert(!PageChecked(page
));
122 ubifs_assert(!PagePrivate(page
));
126 block
= page
->index
<< UBIFS_BLOCKS_PER_PAGE_SHIFT
;
127 beyond
= (i_size
+ UBIFS_BLOCK_SIZE
- 1) >> UBIFS_BLOCK_SHIFT
;
128 if (block
>= beyond
) {
129 /* Reading beyond inode */
130 SetPageChecked(page
);
131 memset(addr
, 0, PAGE_CACHE_SIZE
);
135 dn
= kmalloc(UBIFS_MAX_DATA_NODE_SZ
, GFP_NOFS
);
145 if (block
>= beyond
) {
146 /* Reading beyond inode */
148 memset(addr
, 0, UBIFS_BLOCK_SIZE
);
150 ret
= read_block(inode
, addr
, block
, dn
);
155 } else if (block
+ 1 == beyond
) {
156 int dlen
= le32_to_cpu(dn
->size
);
157 int ilen
= i_size
& (UBIFS_BLOCK_SIZE
- 1);
159 if (ilen
&& ilen
< dlen
)
160 memset(addr
+ ilen
, 0, dlen
- ilen
);
163 if (++i
>= UBIFS_BLOCKS_PER_PAGE
)
166 addr
+= UBIFS_BLOCK_SIZE
;
169 if (err
== -ENOENT
) {
170 /* Not found, so it must be a hole */
171 SetPageChecked(page
);
175 ubifs_err("cannot read page %lu of inode %lu, error %d",
176 page
->index
, inode
->i_ino
, err
);
183 SetPageUptodate(page
);
184 ClearPageError(page
);
185 flush_dcache_page(page
);
191 ClearPageUptodate(page
);
193 flush_dcache_page(page
);
199 * release_new_page_budget - release budget of a new page.
200 * @c: UBIFS file-system description object
202 * This is a helper function which releases budget corresponding to the budget
203 * of one new page of data.
205 static void release_new_page_budget(struct ubifs_info
*c
)
207 struct ubifs_budget_req req
= { .recalculate
= 1, .new_page
= 1 };
209 ubifs_release_budget(c
, &req
);
213 * release_existing_page_budget - release budget of an existing page.
214 * @c: UBIFS file-system description object
216 * This is a helper function which releases budget corresponding to the budget
217 * of changing one one page of data which already exists on the flash media.
219 static void release_existing_page_budget(struct ubifs_info
*c
)
221 struct ubifs_budget_req req
= { .dd_growth
= c
->bi
.page_budget
};
223 ubifs_release_budget(c
, &req
);
226 static int write_begin_slow(struct address_space
*mapping
,
227 loff_t pos
, unsigned len
, struct page
**pagep
,
230 struct inode
*inode
= mapping
->host
;
231 struct ubifs_info
*c
= inode
->i_sb
->s_fs_info
;
232 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
233 struct ubifs_budget_req req
= { .new_page
= 1 };
234 int uninitialized_var(err
), appending
= !!(pos
+ len
> inode
->i_size
);
237 dbg_gen("ino %lu, pos %llu, len %u, i_size %lld",
238 inode
->i_ino
, pos
, len
, inode
->i_size
);
241 * At the slow path we have to budget before locking the page, because
242 * budgeting may force write-back, which would wait on locked pages and
243 * deadlock if we had the page locked. At this point we do not know
244 * anything about the page, so assume that this is a new page which is
245 * written to a hole. This corresponds to largest budget. Later the
246 * budget will be amended if this is not true.
249 /* We are appending data, budget for inode change */
252 err
= ubifs_budget_space(c
, &req
);
256 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
257 if (unlikely(!page
)) {
258 ubifs_release_budget(c
, &req
);
262 if (!PageUptodate(page
)) {
263 if (!(pos
& ~PAGE_CACHE_MASK
) && len
== PAGE_CACHE_SIZE
)
264 SetPageChecked(page
);
266 err
= do_readpage(page
);
269 page_cache_release(page
);
274 SetPageUptodate(page
);
275 ClearPageError(page
);
278 if (PagePrivate(page
))
280 * The page is dirty, which means it was budgeted twice:
281 * o first time the budget was allocated by the task which
282 * made the page dirty and set the PG_private flag;
283 * o and then we budgeted for it for the second time at the
284 * very beginning of this function.
286 * So what we have to do is to release the page budget we
289 release_new_page_budget(c
);
290 else if (!PageChecked(page
))
292 * We are changing a page which already exists on the media.
293 * This means that changing the page does not make the amount
294 * of indexing information larger, and this part of the budget
295 * which we have already acquired may be released.
297 ubifs_convert_page_budget(c
);
300 struct ubifs_inode
*ui
= ubifs_inode(inode
);
303 * 'ubifs_write_end()' is optimized from the fast-path part of
304 * 'ubifs_write_begin()' and expects the @ui_mutex to be locked
305 * if data is appended.
307 mutex_lock(&ui
->ui_mutex
);
310 * The inode is dirty already, so we may free the
311 * budget we allocated.
313 ubifs_release_dirty_inode_budget(c
, ui
);
321 * allocate_budget - allocate budget for 'ubifs_write_begin()'.
322 * @c: UBIFS file-system description object
323 * @page: page to allocate budget for
324 * @ui: UBIFS inode object the page belongs to
325 * @appending: non-zero if the page is appended
327 * This is a helper function for 'ubifs_write_begin()' which allocates budget
328 * for the operation. The budget is allocated differently depending on whether
329 * this is appending, whether the page is dirty or not, and so on. This
330 * function leaves the @ui->ui_mutex locked in case of appending. Returns zero
331 * in case of success and %-ENOSPC in case of failure.
333 static int allocate_budget(struct ubifs_info
*c
, struct page
*page
,
334 struct ubifs_inode
*ui
, int appending
)
336 struct ubifs_budget_req req
= { .fast
= 1 };
338 if (PagePrivate(page
)) {
341 * The page is dirty and we are not appending, which
342 * means no budget is needed at all.
346 mutex_lock(&ui
->ui_mutex
);
349 * The page is dirty and we are appending, so the inode
350 * has to be marked as dirty. However, it is already
351 * dirty, so we do not need any budget. We may return,
352 * but @ui->ui_mutex hast to be left locked because we
353 * should prevent write-back from flushing the inode
354 * and freeing the budget. The lock will be released in
355 * 'ubifs_write_end()'.
360 * The page is dirty, we are appending, the inode is clean, so
361 * we need to budget the inode change.
365 if (PageChecked(page
))
367 * The page corresponds to a hole and does not
368 * exist on the media. So changing it makes
369 * make the amount of indexing information
370 * larger, and we have to budget for a new
376 * Not a hole, the change will not add any new
377 * indexing information, budget for page
380 req
.dirtied_page
= 1;
383 mutex_lock(&ui
->ui_mutex
);
386 * The inode is clean but we will have to mark
387 * it as dirty because we are appending. This
394 return ubifs_budget_space(c
, &req
);
398 * This function is called when a page of data is going to be written. Since
399 * the page of data will not necessarily go to the flash straight away, UBIFS
400 * has to reserve space on the media for it, which is done by means of
403 * This is the hot-path of the file-system and we are trying to optimize it as
404 * much as possible. For this reasons it is split on 2 parts - slow and fast.
406 * There many budgeting cases:
407 * o a new page is appended - we have to budget for a new page and for
408 * changing the inode; however, if the inode is already dirty, there is
409 * no need to budget for it;
410 * o an existing clean page is changed - we have budget for it; if the page
411 * does not exist on the media (a hole), we have to budget for a new
412 * page; otherwise, we may budget for changing an existing page; the
413 * difference between these cases is that changing an existing page does
414 * not introduce anything new to the FS indexing information, so it does
415 * not grow, and smaller budget is acquired in this case;
416 * o an existing dirty page is changed - no need to budget at all, because
417 * the page budget has been acquired by earlier, when the page has been
420 * UBIFS budgeting sub-system may force write-back if it thinks there is no
421 * space to reserve. This imposes some locking restrictions and makes it
422 * impossible to take into account the above cases, and makes it impossible to
423 * optimize budgeting.
425 * The solution for this is that the fast path of 'ubifs_write_begin()' assumes
426 * there is a plenty of flash space and the budget will be acquired quickly,
427 * without forcing write-back. The slow path does not make this assumption.
429 static int ubifs_write_begin(struct file
*file
, struct address_space
*mapping
,
430 loff_t pos
, unsigned len
, unsigned flags
,
431 struct page
**pagep
, void **fsdata
)
433 struct inode
*inode
= mapping
->host
;
434 struct ubifs_info
*c
= inode
->i_sb
->s_fs_info
;
435 struct ubifs_inode
*ui
= ubifs_inode(inode
);
436 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
437 int uninitialized_var(err
), appending
= !!(pos
+ len
> inode
->i_size
);
438 int skipped_read
= 0;
441 ubifs_assert(ubifs_inode(inode
)->ui_size
== inode
->i_size
);
442 ubifs_assert(!c
->ro_media
&& !c
->ro_mount
);
444 if (unlikely(c
->ro_error
))
447 /* Try out the fast-path part first */
448 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
452 if (!PageUptodate(page
)) {
453 /* The page is not loaded from the flash */
454 if (!(pos
& ~PAGE_CACHE_MASK
) && len
== PAGE_CACHE_SIZE
) {
456 * We change whole page so no need to load it. But we
457 * do not know whether this page exists on the media or
458 * not, so we assume the latter because it requires
459 * larger budget. The assumption is that it is better
460 * to budget a bit more than to read the page from the
461 * media. Thus, we are setting the @PG_checked flag
464 SetPageChecked(page
);
467 err
= do_readpage(page
);
470 page_cache_release(page
);
475 SetPageUptodate(page
);
476 ClearPageError(page
);
479 err
= allocate_budget(c
, page
, ui
, appending
);
481 ubifs_assert(err
== -ENOSPC
);
483 * If we skipped reading the page because we were going to
484 * write all of it, then it is not up to date.
487 ClearPageChecked(page
);
488 ClearPageUptodate(page
);
491 * Budgeting failed which means it would have to force
492 * write-back but didn't, because we set the @fast flag in the
493 * request. Write-back cannot be done now, while we have the
494 * page locked, because it would deadlock. Unlock and free
495 * everything and fall-back to slow-path.
498 ubifs_assert(mutex_is_locked(&ui
->ui_mutex
));
499 mutex_unlock(&ui
->ui_mutex
);
502 page_cache_release(page
);
504 return write_begin_slow(mapping
, pos
, len
, pagep
, flags
);
508 * Whee, we acquired budgeting quickly - without involving
509 * garbage-collection, committing or forcing write-back. We return
510 * with @ui->ui_mutex locked if we are appending pages, and unlocked
511 * otherwise. This is an optimization (slightly hacky though).
519 * cancel_budget - cancel budget.
520 * @c: UBIFS file-system description object
521 * @page: page to cancel budget for
522 * @ui: UBIFS inode object the page belongs to
523 * @appending: non-zero if the page is appended
525 * This is a helper function for a page write operation. It unlocks the
526 * @ui->ui_mutex in case of appending.
528 static void cancel_budget(struct ubifs_info
*c
, struct page
*page
,
529 struct ubifs_inode
*ui
, int appending
)
533 ubifs_release_dirty_inode_budget(c
, ui
);
534 mutex_unlock(&ui
->ui_mutex
);
536 if (!PagePrivate(page
)) {
537 if (PageChecked(page
))
538 release_new_page_budget(c
);
540 release_existing_page_budget(c
);
544 static int ubifs_write_end(struct file
*file
, struct address_space
*mapping
,
545 loff_t pos
, unsigned len
, unsigned copied
,
546 struct page
*page
, void *fsdata
)
548 struct inode
*inode
= mapping
->host
;
549 struct ubifs_inode
*ui
= ubifs_inode(inode
);
550 struct ubifs_info
*c
= inode
->i_sb
->s_fs_info
;
551 loff_t end_pos
= pos
+ len
;
552 int appending
= !!(end_pos
> inode
->i_size
);
554 dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld",
555 inode
->i_ino
, pos
, page
->index
, len
, copied
, inode
->i_size
);
557 if (unlikely(copied
< len
&& len
== PAGE_CACHE_SIZE
)) {
559 * VFS copied less data to the page that it intended and
560 * declared in its '->write_begin()' call via the @len
561 * argument. If the page was not up-to-date, and @len was
562 * @PAGE_CACHE_SIZE, the 'ubifs_write_begin()' function did
563 * not load it from the media (for optimization reasons). This
564 * means that part of the page contains garbage. So read the
567 dbg_gen("copied %d instead of %d, read page and repeat",
569 cancel_budget(c
, page
, ui
, appending
);
570 ClearPageChecked(page
);
573 * Return 0 to force VFS to repeat the whole operation, or the
574 * error code if 'do_readpage()' fails.
576 copied
= do_readpage(page
);
580 if (!PagePrivate(page
)) {
581 SetPagePrivate(page
);
582 atomic_long_inc(&c
->dirty_pg_cnt
);
583 __set_page_dirty_nobuffers(page
);
587 i_size_write(inode
, end_pos
);
588 ui
->ui_size
= end_pos
;
590 * Note, we do not set @I_DIRTY_PAGES (which means that the
591 * inode has dirty pages), this has been done in
592 * '__set_page_dirty_nobuffers()'.
594 __mark_inode_dirty(inode
, I_DIRTY_DATASYNC
);
595 ubifs_assert(mutex_is_locked(&ui
->ui_mutex
));
596 mutex_unlock(&ui
->ui_mutex
);
601 page_cache_release(page
);
606 * populate_page - copy data nodes into a page for bulk-read.
607 * @c: UBIFS file-system description object
609 * @bu: bulk-read information
610 * @n: next zbranch slot
612 * This function returns %0 on success and a negative error code on failure.
614 static int populate_page(struct ubifs_info
*c
, struct page
*page
,
615 struct bu_info
*bu
, int *n
)
617 int i
= 0, nn
= *n
, offs
= bu
->zbranch
[0].offs
, hole
= 0, read
= 0;
618 struct inode
*inode
= page
->mapping
->host
;
619 loff_t i_size
= i_size_read(inode
);
620 unsigned int page_block
;
624 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
625 inode
->i_ino
, page
->index
, i_size
, page
->flags
);
627 addr
= zaddr
= kmap(page
);
629 end_index
= (i_size
- 1) >> PAGE_CACHE_SHIFT
;
630 if (!i_size
|| page
->index
> end_index
) {
632 memset(addr
, 0, PAGE_CACHE_SIZE
);
636 page_block
= page
->index
<< UBIFS_BLOCKS_PER_PAGE_SHIFT
;
638 int err
, len
, out_len
, dlen
;
642 memset(addr
, 0, UBIFS_BLOCK_SIZE
);
643 } else if (key_block(c
, &bu
->zbranch
[nn
].key
) == page_block
) {
644 struct ubifs_data_node
*dn
;
646 dn
= bu
->buf
+ (bu
->zbranch
[nn
].offs
- offs
);
648 ubifs_assert(le64_to_cpu(dn
->ch
.sqnum
) >
649 ubifs_inode(inode
)->creat_sqnum
);
651 len
= le32_to_cpu(dn
->size
);
652 if (len
<= 0 || len
> UBIFS_BLOCK_SIZE
)
655 dlen
= le32_to_cpu(dn
->ch
.len
) - UBIFS_DATA_NODE_SZ
;
657 if (UBIFS_COMPR_LZ4K
== le16_to_cpu(dn
->compr_type
))
658 out_len
= len
; //Jack modify for lz4k decompress
660 out_len
= UBIFS_BLOCK_SIZE
;
661 err
= ubifs_decompress(&dn
->data
, dlen
, addr
, &out_len
,
662 le16_to_cpu(dn
->compr_type
));
663 if (err
|| len
!= out_len
)
666 if (len
< UBIFS_BLOCK_SIZE
)
667 memset(addr
+ len
, 0, UBIFS_BLOCK_SIZE
- len
);
670 read
= (i
<< UBIFS_BLOCK_SHIFT
) + len
;
671 } else if (key_block(c
, &bu
->zbranch
[nn
].key
) < page_block
) {
676 memset(addr
, 0, UBIFS_BLOCK_SIZE
);
678 if (++i
>= UBIFS_BLOCKS_PER_PAGE
)
680 addr
+= UBIFS_BLOCK_SIZE
;
684 if (end_index
== page
->index
) {
685 int len
= i_size
& (PAGE_CACHE_SIZE
- 1);
687 if (len
&& len
< read
)
688 memset(zaddr
+ len
, 0, read
- len
);
693 SetPageChecked(page
);
697 SetPageUptodate(page
);
698 ClearPageError(page
);
699 flush_dcache_page(page
);
705 ClearPageUptodate(page
);
707 flush_dcache_page(page
);
709 ubifs_err("bad data node (block %u, inode %lu)",
710 page_block
, inode
->i_ino
);
715 * ubifs_do_bulk_read - do bulk-read.
716 * @c: UBIFS file-system description object
717 * @bu: bulk-read information
718 * @page1: first page to read
720 * This function returns %1 if the bulk-read is done, otherwise %0 is returned.
722 static int ubifs_do_bulk_read(struct ubifs_info
*c
, struct bu_info
*bu
,
725 pgoff_t offset
= page1
->index
, end_index
;
726 struct address_space
*mapping
= page1
->mapping
;
727 struct inode
*inode
= mapping
->host
;
728 struct ubifs_inode
*ui
= ubifs_inode(inode
);
729 int err
, page_idx
, page_cnt
, ret
= 0, n
= 0;
730 int allocate
= bu
->buf
? 0 : 1;
733 err
= ubifs_tnc_get_bu_keys(c
, bu
);
738 /* Turn off bulk-read at the end of the file */
739 ui
->read_in_a_row
= 1;
743 page_cnt
= bu
->blk_cnt
>> UBIFS_BLOCKS_PER_PAGE_SHIFT
;
746 * This happens when there are multiple blocks per page and the
747 * blocks for the first page we are looking for, are not
748 * together. If all the pages were like this, bulk-read would
749 * reduce performance, so we turn it off for a while.
757 * Allocate bulk-read buffer depending on how many data
758 * nodes we are going to read.
760 bu
->buf_len
= bu
->zbranch
[bu
->cnt
- 1].offs
+
761 bu
->zbranch
[bu
->cnt
- 1].len
-
763 ubifs_assert(bu
->buf_len
> 0);
764 ubifs_assert(bu
->buf_len
<= c
->leb_size
);
765 bu
->buf
= kmalloc(bu
->buf_len
, GFP_NOFS
| __GFP_NOWARN
);
770 err
= ubifs_tnc_bulk_read(c
, bu
);
775 err
= populate_page(c
, page1
, bu
, &n
);
782 isize
= i_size_read(inode
);
785 end_index
= ((isize
- 1) >> PAGE_CACHE_SHIFT
);
787 for (page_idx
= 1; page_idx
< page_cnt
; page_idx
++) {
788 pgoff_t page_offset
= offset
+ page_idx
;
791 if (page_offset
> end_index
)
793 page
= find_or_create_page(mapping
, page_offset
,
794 GFP_NOFS
| __GFP_COLD
);
797 if (!PageUptodate(page
))
798 err
= populate_page(c
, page
, bu
, &n
);
800 page_cache_release(page
);
805 ui
->last_page_read
= offset
+ page_idx
- 1;
813 ubifs_warn("ignoring error %d and skipping bulk-read", err
);
817 ui
->read_in_a_row
= ui
->bulk_read
= 0;
822 * ubifs_bulk_read - determine whether to bulk-read and, if so, do it.
823 * @page: page from which to start bulk-read.
825 * Some flash media are capable of reading sequentially at faster rates. UBIFS
826 * bulk-read facility is designed to take advantage of that, by reading in one
827 * go consecutive data nodes that are also located consecutively in the same
828 * LEB. This function returns %1 if a bulk-read is done and %0 otherwise.
830 static int ubifs_bulk_read(struct page
*page
)
832 struct inode
*inode
= page
->mapping
->host
;
833 struct ubifs_info
*c
= inode
->i_sb
->s_fs_info
;
834 struct ubifs_inode
*ui
= ubifs_inode(inode
);
835 pgoff_t index
= page
->index
, last_page_read
= ui
->last_page_read
;
837 int err
= 0, allocated
= 0;
839 ui
->last_page_read
= index
;
844 * Bulk-read is protected by @ui->ui_mutex, but it is an optimization,
845 * so don't bother if we cannot lock the mutex.
847 if (!mutex_trylock(&ui
->ui_mutex
))
850 if (index
!= last_page_read
+ 1) {
851 /* Turn off bulk-read if we stop reading sequentially */
852 ui
->read_in_a_row
= 1;
858 if (!ui
->bulk_read
) {
859 ui
->read_in_a_row
+= 1;
860 if (ui
->read_in_a_row
< 3)
862 /* Three reads in a row, so switch on bulk-read */
867 * If possible, try to use pre-allocated bulk-read information, which
868 * is protected by @c->bu_mutex.
870 if (mutex_trylock(&c
->bu_mutex
))
873 bu
= kmalloc(sizeof(struct bu_info
), GFP_NOFS
| __GFP_NOWARN
);
881 bu
->buf_len
= c
->max_bu_buf_len
;
882 data_key_init(c
, &bu
->key
, inode
->i_ino
,
883 page
->index
<< UBIFS_BLOCKS_PER_PAGE_SHIFT
);
884 err
= ubifs_do_bulk_read(c
, bu
, page
);
887 mutex_unlock(&c
->bu_mutex
);
892 mutex_unlock(&ui
->ui_mutex
);
896 static int ubifs_readpage(struct file
*file
, struct page
*page
)
898 if (ubifs_bulk_read(page
))
905 static int do_writepage(struct page
*page
, int len
)
907 int err
= 0, i
, blen
;
911 struct inode
*inode
= page
->mapping
->host
;
912 struct ubifs_info
*c
= inode
->i_sb
->s_fs_info
;
915 spin_lock(&ui
->ui_lock
);
916 ubifs_assert(page
->index
<= ui
->synced_i_size
<< PAGE_CACHE_SIZE
);
917 spin_unlock(&ui
->ui_lock
);
920 /* Update radix tree tags */
921 set_page_writeback(page
);
924 block
= page
->index
<< UBIFS_BLOCKS_PER_PAGE_SHIFT
;
927 blen
= min_t(int, len
, UBIFS_BLOCK_SIZE
);
928 data_key_init(c
, &key
, inode
->i_ino
, block
);
929 err
= ubifs_jnl_write_data(c
, inode
, &key
, addr
, blen
);
932 if (++i
>= UBIFS_BLOCKS_PER_PAGE
)
940 ubifs_err("cannot write page %lu of inode %lu, error %d",
941 page
->index
, inode
->i_ino
, err
);
942 ubifs_ro_mode(c
, err
);
945 ubifs_assert(PagePrivate(page
));
946 if (PageChecked(page
))
947 release_new_page_budget(c
);
949 release_existing_page_budget(c
);
951 atomic_long_dec(&c
->dirty_pg_cnt
);
952 ClearPagePrivate(page
);
953 ClearPageChecked(page
);
957 end_page_writeback(page
);
962 * When writing-back dirty inodes, VFS first writes-back pages belonging to the
963 * inode, then the inode itself. For UBIFS this may cause a problem. Consider a
964 * situation when a we have an inode with size 0, then a megabyte of data is
965 * appended to the inode, then write-back starts and flushes some amount of the
966 * dirty pages, the journal becomes full, commit happens and finishes, and then
967 * an unclean reboot happens. When the file system is mounted next time, the
968 * inode size would still be 0, but there would be many pages which are beyond
969 * the inode size, they would be indexed and consume flash space. Because the
970 * journal has been committed, the replay would not be able to detect this
971 * situation and correct the inode size. This means UBIFS would have to scan
972 * whole index and correct all inode sizes, which is long an unacceptable.
974 * To prevent situations like this, UBIFS writes pages back only if they are
975 * within the last synchronized inode size, i.e. the size which has been
976 * written to the flash media last time. Otherwise, UBIFS forces inode
977 * write-back, thus making sure the on-flash inode contains current inode size,
978 * and then keeps writing pages back.
980 * Some locking issues explanation. 'ubifs_writepage()' first is called with
981 * the page locked, and it locks @ui_mutex. However, write-back does take inode
982 * @i_mutex, which means other VFS operations may be run on this inode at the
983 * same time. And the problematic one is truncation to smaller size, from where
984 * we have to call 'truncate_setsize()', which first changes @inode->i_size,
985 * then drops the truncated pages. And while dropping the pages, it takes the
986 * page lock. This means that 'do_truncation()' cannot call 'truncate_setsize()'
987 * with @ui_mutex locked, because it would deadlock with 'ubifs_writepage()'.
988 * This means that @inode->i_size is changed while @ui_mutex is unlocked.
990 * XXX(truncate): with the new truncate sequence this is not true anymore,
991 * and the calls to truncate_setsize can be move around freely. They should
992 * be moved to the very end of the truncate sequence.
994 * But in 'ubifs_writepage()' we have to guarantee that we do not write beyond
995 * inode size. How do we do this if @inode->i_size may became smaller while we
996 * are in the middle of 'ubifs_writepage()'? The UBIFS solution is the
997 * @ui->ui_isize "shadow" field which UBIFS uses instead of @inode->i_size
998 * internally and updates it under @ui_mutex.
1000 * Q: why we do not worry that if we race with truncation, we may end up with a
1001 * situation when the inode is truncated while we are in the middle of
1002 * 'do_writepage()', so we do write beyond inode size?
1003 * A: If we are in the middle of 'do_writepage()', truncation would be locked
1004 * on the page lock and it would not write the truncated inode node to the
1005 * journal before we have finished.
1007 static int ubifs_writepage(struct page
*page
, struct writeback_control
*wbc
)
1009 struct inode
*inode
= page
->mapping
->host
;
1010 struct ubifs_inode
*ui
= ubifs_inode(inode
);
1011 loff_t i_size
= i_size_read(inode
), synced_i_size
;
1012 pgoff_t end_index
= i_size
>> PAGE_CACHE_SHIFT
;
1013 int err
, len
= i_size
& (PAGE_CACHE_SIZE
- 1);
1016 dbg_gen("ino %lu, pg %lu, pg flags %#lx",
1017 inode
->i_ino
, page
->index
, page
->flags
);
1018 ubifs_assert(PagePrivate(page
));
1020 /* Is the page fully outside @i_size? (truncate in progress) */
1021 if (page
->index
> end_index
|| (page
->index
== end_index
&& !len
)) {
1026 spin_lock(&ui
->ui_lock
);
1027 synced_i_size
= ui
->synced_i_size
;
1028 spin_unlock(&ui
->ui_lock
);
1030 /* Is the page fully inside @i_size? */
1031 if (page
->index
< end_index
) {
1032 if (page
->index
>= synced_i_size
>> PAGE_CACHE_SHIFT
) {
1033 err
= inode
->i_sb
->s_op
->write_inode(inode
, NULL
);
1037 * The inode has been written, but the write-buffer has
1038 * not been synchronized, so in case of an unclean
1039 * reboot we may end up with some pages beyond inode
1040 * size, but they would be in the journal (because
1041 * commit flushes write buffers) and recovery would deal
1045 return do_writepage(page
, PAGE_CACHE_SIZE
);
1049 * The page straddles @i_size. It must be zeroed out on each and every
1050 * writepage invocation because it may be mmapped. "A file is mapped
1051 * in multiples of the page size. For a file that is not a multiple of
1052 * the page size, the remaining memory is zeroed when mapped, and
1053 * writes to that region are not written out to the file."
1055 kaddr
= kmap_atomic(page
);
1056 memset(kaddr
+ len
, 0, PAGE_CACHE_SIZE
- len
);
1057 flush_dcache_page(page
);
1058 kunmap_atomic(kaddr
);
1060 if (i_size
> synced_i_size
) {
1061 err
= inode
->i_sb
->s_op
->write_inode(inode
, NULL
);
1066 return do_writepage(page
, len
);
1074 * do_attr_changes - change inode attributes.
1075 * @inode: inode to change attributes for
1076 * @attr: describes attributes to change
1078 static void do_attr_changes(struct inode
*inode
, const struct iattr
*attr
)
1080 if (attr
->ia_valid
& ATTR_UID
)
1081 inode
->i_uid
= attr
->ia_uid
;
1082 if (attr
->ia_valid
& ATTR_GID
)
1083 inode
->i_gid
= attr
->ia_gid
;
1084 if (attr
->ia_valid
& ATTR_ATIME
)
1085 inode
->i_atime
= timespec_trunc(attr
->ia_atime
,
1086 inode
->i_sb
->s_time_gran
);
1087 if (attr
->ia_valid
& ATTR_MTIME
)
1088 inode
->i_mtime
= timespec_trunc(attr
->ia_mtime
,
1089 inode
->i_sb
->s_time_gran
);
1090 if (attr
->ia_valid
& ATTR_CTIME
)
1091 inode
->i_ctime
= timespec_trunc(attr
->ia_ctime
,
1092 inode
->i_sb
->s_time_gran
);
1093 if (attr
->ia_valid
& ATTR_MODE
) {
1094 umode_t mode
= attr
->ia_mode
;
1096 if (!in_group_p(inode
->i_gid
) && !capable(CAP_FSETID
))
1098 inode
->i_mode
= mode
;
1103 * do_truncation - truncate an inode.
1104 * @c: UBIFS file-system description object
1105 * @inode: inode to truncate
1106 * @attr: inode attribute changes description
1108 * This function implements VFS '->setattr()' call when the inode is truncated
1109 * to a smaller size. Returns zero in case of success and a negative error code
1110 * in case of failure.
1112 static int do_truncation(struct ubifs_info
*c
, struct inode
*inode
,
1113 const struct iattr
*attr
)
1116 struct ubifs_budget_req req
;
1117 loff_t old_size
= inode
->i_size
, new_size
= attr
->ia_size
;
1118 int offset
= new_size
& (UBIFS_BLOCK_SIZE
- 1), budgeted
= 1;
1119 struct ubifs_inode
*ui
= ubifs_inode(inode
);
1121 dbg_gen("ino %lu, size %lld -> %lld", inode
->i_ino
, old_size
, new_size
);
1122 memset(&req
, 0, sizeof(struct ubifs_budget_req
));
1125 * If this is truncation to a smaller size, and we do not truncate on a
1126 * block boundary, budget for changing one data block, because the last
1127 * block will be re-written.
1129 if (new_size
& (UBIFS_BLOCK_SIZE
- 1))
1130 req
.dirtied_page
= 1;
1132 req
.dirtied_ino
= 1;
1133 /* A funny way to budget for truncation node */
1134 req
.dirtied_ino_d
= UBIFS_TRUN_NODE_SZ
;
1135 err
= ubifs_budget_space(c
, &req
);
1138 * Treat truncations to zero as deletion and always allow them,
1139 * just like we do for '->unlink()'.
1141 if (new_size
|| err
!= -ENOSPC
)
1146 truncate_setsize(inode
, new_size
);
1149 pgoff_t index
= new_size
>> PAGE_CACHE_SHIFT
;
1152 page
= find_lock_page(inode
->i_mapping
, index
);
1154 if (PageDirty(page
)) {
1156 * 'ubifs_jnl_truncate()' will try to truncate
1157 * the last data node, but it contains
1158 * out-of-date data because the page is dirty.
1159 * Write the page now, so that
1160 * 'ubifs_jnl_truncate()' will see an already
1161 * truncated (and up to date) data node.
1163 ubifs_assert(PagePrivate(page
));
1165 clear_page_dirty_for_io(page
);
1166 if (UBIFS_BLOCKS_PER_PAGE_SHIFT
)
1168 (PAGE_CACHE_SIZE
- 1);
1169 err
= do_writepage(page
, offset
);
1170 page_cache_release(page
);
1174 * We could now tell 'ubifs_jnl_truncate()' not
1175 * to read the last block.
1179 * We could 'kmap()' the page and pass the data
1180 * to 'ubifs_jnl_truncate()' to save it from
1181 * having to read it.
1184 page_cache_release(page
);
1189 mutex_lock(&ui
->ui_mutex
);
1190 ui
->ui_size
= inode
->i_size
;
1191 /* Truncation changes inode [mc]time */
1192 inode
->i_mtime
= inode
->i_ctime
= ubifs_current_time(inode
);
1193 /* Other attributes may be changed at the same time as well */
1194 do_attr_changes(inode
, attr
);
1195 err
= ubifs_jnl_truncate(c
, inode
, old_size
, new_size
);
1196 mutex_unlock(&ui
->ui_mutex
);
1200 ubifs_release_budget(c
, &req
);
1202 c
->bi
.nospace
= c
->bi
.nospace_rp
= 0;
1209 * do_setattr - change inode attributes.
1210 * @c: UBIFS file-system description object
1211 * @inode: inode to change attributes for
1212 * @attr: inode attribute changes description
1214 * This function implements VFS '->setattr()' call for all cases except
1215 * truncations to smaller size. Returns zero in case of success and a negative
1216 * error code in case of failure.
1218 static int do_setattr(struct ubifs_info
*c
, struct inode
*inode
,
1219 const struct iattr
*attr
)
1222 loff_t new_size
= attr
->ia_size
;
1223 struct ubifs_inode
*ui
= ubifs_inode(inode
);
1224 struct ubifs_budget_req req
= { .dirtied_ino
= 1,
1225 .dirtied_ino_d
= ALIGN(ui
->data_len
, 8) };
1227 err
= ubifs_budget_space(c
, &req
);
1231 if (attr
->ia_valid
& ATTR_SIZE
) {
1232 dbg_gen("size %lld -> %lld", inode
->i_size
, new_size
);
1233 truncate_setsize(inode
, new_size
);
1236 mutex_lock(&ui
->ui_mutex
);
1237 if (attr
->ia_valid
& ATTR_SIZE
) {
1238 /* Truncation changes inode [mc]time */
1239 inode
->i_mtime
= inode
->i_ctime
= ubifs_current_time(inode
);
1240 /* 'truncate_setsize()' changed @i_size, update @ui_size */
1241 ui
->ui_size
= inode
->i_size
;
1244 do_attr_changes(inode
, attr
);
1246 release
= ui
->dirty
;
1247 if (attr
->ia_valid
& ATTR_SIZE
)
1249 * Inode length changed, so we have to make sure
1250 * @I_DIRTY_DATASYNC is set.
1252 __mark_inode_dirty(inode
, I_DIRTY_SYNC
| I_DIRTY_DATASYNC
);
1254 mark_inode_dirty_sync(inode
);
1255 mutex_unlock(&ui
->ui_mutex
);
1258 ubifs_release_budget(c
, &req
);
1260 err
= inode
->i_sb
->s_op
->write_inode(inode
, NULL
);
1264 int ubifs_setattr(struct dentry
*dentry
, struct iattr
*attr
)
1267 struct inode
*inode
= dentry
->d_inode
;
1268 struct ubifs_info
*c
= inode
->i_sb
->s_fs_info
;
1270 dbg_gen("ino %lu, mode %#x, ia_valid %#x",
1271 inode
->i_ino
, inode
->i_mode
, attr
->ia_valid
);
1272 err
= inode_change_ok(inode
, attr
);
1276 err
= dbg_check_synced_i_size(c
, inode
);
1280 if ((attr
->ia_valid
& ATTR_SIZE
) && attr
->ia_size
< inode
->i_size
)
1281 /* Truncation to a smaller size */
1282 err
= do_truncation(c
, inode
, attr
);
1284 err
= do_setattr(c
, inode
, attr
);
1289 static void ubifs_invalidatepage(struct page
*page
, unsigned long offset
)
1291 struct inode
*inode
= page
->mapping
->host
;
1292 struct ubifs_info
*c
= inode
->i_sb
->s_fs_info
;
1294 ubifs_assert(PagePrivate(page
));
1296 /* Partial page remains dirty */
1299 if (PageChecked(page
))
1300 release_new_page_budget(c
);
1302 release_existing_page_budget(c
);
1304 atomic_long_dec(&c
->dirty_pg_cnt
);
1305 ClearPagePrivate(page
);
1306 ClearPageChecked(page
);
1309 static void *ubifs_follow_link(struct dentry
*dentry
, struct nameidata
*nd
)
1311 struct ubifs_inode
*ui
= ubifs_inode(dentry
->d_inode
);
1313 nd_set_link(nd
, ui
->data
);
1317 int ubifs_fsync(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
1319 struct inode
*inode
= file
->f_mapping
->host
;
1320 struct ubifs_info
*c
= inode
->i_sb
->s_fs_info
;
1323 dbg_gen("syncing inode %lu", inode
->i_ino
);
1327 * For some really strange reasons VFS does not filter out
1328 * 'fsync()' for R/O mounted file-systems as per 2.6.39.
1332 err
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
1335 mutex_lock(&inode
->i_mutex
);
1337 /* Synchronize the inode unless this is a 'datasync()' call. */
1338 if (!datasync
|| (inode
->i_state
& I_DIRTY_DATASYNC
)) {
1339 err
= inode
->i_sb
->s_op
->write_inode(inode
, NULL
);
1345 * Nodes related to this inode may still sit in a write-buffer. Flush
1348 err
= ubifs_sync_wbufs_by_inode(c
, inode
);
1350 mutex_unlock(&inode
->i_mutex
);
1355 * mctime_update_needed - check if mtime or ctime update is needed.
1356 * @inode: the inode to do the check for
1357 * @now: current time
1359 * This helper function checks if the inode mtime/ctime should be updated or
1360 * not. If current values of the time-stamps are within the UBIFS inode time
1361 * granularity, they are not updated. This is an optimization.
1363 static inline int mctime_update_needed(const struct inode
*inode
,
1364 const struct timespec
*now
)
1366 if (!timespec_equal(&inode
->i_mtime
, now
) ||
1367 !timespec_equal(&inode
->i_ctime
, now
))
1373 * update_ctime - update mtime and ctime of an inode.
1374 * @c: UBIFS file-system description object
1375 * @inode: inode to update
1377 * This function updates mtime and ctime of the inode if it is not equivalent to
1378 * current time. Returns zero in case of success and a negative error code in
1381 static int update_mctime(struct ubifs_info
*c
, struct inode
*inode
)
1383 struct timespec now
= ubifs_current_time(inode
);
1384 struct ubifs_inode
*ui
= ubifs_inode(inode
);
1386 if (mctime_update_needed(inode
, &now
)) {
1388 struct ubifs_budget_req req
= { .dirtied_ino
= 1,
1389 .dirtied_ino_d
= ALIGN(ui
->data_len
, 8) };
1391 err
= ubifs_budget_space(c
, &req
);
1395 mutex_lock(&ui
->ui_mutex
);
1396 inode
->i_mtime
= inode
->i_ctime
= ubifs_current_time(inode
);
1397 release
= ui
->dirty
;
1398 mark_inode_dirty_sync(inode
);
1399 mutex_unlock(&ui
->ui_mutex
);
1401 ubifs_release_budget(c
, &req
);
1407 static ssize_t
ubifs_aio_write(struct kiocb
*iocb
, const struct iovec
*iov
,
1408 unsigned long nr_segs
, loff_t pos
)
1411 struct inode
*inode
= iocb
->ki_filp
->f_mapping
->host
;
1412 struct ubifs_info
*c
= inode
->i_sb
->s_fs_info
;
1414 err
= update_mctime(c
, inode
);
1418 return generic_file_aio_write(iocb
, iov
, nr_segs
, pos
);
1421 static int ubifs_set_page_dirty(struct page
*page
)
1425 ret
= __set_page_dirty_nobuffers(page
);
1427 * An attempt to dirty a page without budgeting for it - should not
1430 ubifs_assert(ret
== 0);
1434 #ifdef CONFIG_MIGRATION
1435 static int ubifs_migrate_page(struct address_space
*mapping
,
1436 struct page
*newpage
, struct page
*page
, enum migrate_mode mode
)
1440 rc
= migrate_page_move_mapping(mapping
, newpage
, page
, NULL
, mode
);
1441 if (rc
!= MIGRATEPAGE_SUCCESS
)
1444 if (PagePrivate(page
)) {
1445 ClearPagePrivate(page
);
1446 SetPagePrivate(newpage
);
1449 migrate_page_copy(newpage
, page
);
1450 return MIGRATEPAGE_SUCCESS
;
1454 static int ubifs_releasepage(struct page
*page
, gfp_t unused_gfp_flags
)
1457 * An attempt to release a dirty page without budgeting for it - should
1460 if (PageWriteback(page
))
1462 ubifs_assert(PagePrivate(page
));
1464 ClearPagePrivate(page
);
1465 ClearPageChecked(page
);
1470 * mmap()d file has taken write protection fault and is being made writable.
1471 * UBIFS must ensure page is budgeted for.
1473 static int ubifs_vm_page_mkwrite(struct vm_area_struct
*vma
,
1474 struct vm_fault
*vmf
)
1476 struct page
*page
= vmf
->page
;
1477 struct inode
*inode
= file_inode(vma
->vm_file
);
1478 struct ubifs_info
*c
= inode
->i_sb
->s_fs_info
;
1479 struct timespec now
= ubifs_current_time(inode
);
1480 struct ubifs_budget_req req
= { .new_page
= 1 };
1481 int err
, update_time
;
1483 dbg_gen("ino %lu, pg %lu, i_size %lld", inode
->i_ino
, page
->index
,
1484 i_size_read(inode
));
1485 ubifs_assert(!c
->ro_media
&& !c
->ro_mount
);
1487 if (unlikely(c
->ro_error
))
1488 return VM_FAULT_SIGBUS
; /* -EROFS */
1491 * We have not locked @page so far so we may budget for changing the
1492 * page. Note, we cannot do this after we locked the page, because
1493 * budgeting may cause write-back which would cause deadlock.
1495 * At the moment we do not know whether the page is dirty or not, so we
1496 * assume that it is not and budget for a new page. We could look at
1497 * the @PG_private flag and figure this out, but we may race with write
1498 * back and the page state may change by the time we lock it, so this
1499 * would need additional care. We do not bother with this at the
1500 * moment, although it might be good idea to do. Instead, we allocate
1501 * budget for a new page and amend it later on if the page was in fact
1504 * The budgeting-related logic of this function is similar to what we
1505 * do in 'ubifs_write_begin()' and 'ubifs_write_end()'. Glance there
1506 * for more comments.
1508 update_time
= mctime_update_needed(inode
, &now
);
1511 * We have to change inode time stamp which requires extra
1514 req
.dirtied_ino
= 1;
1516 err
= ubifs_budget_space(c
, &req
);
1517 if (unlikely(err
)) {
1519 ubifs_warn("out of space for mmapped file (inode number %lu)",
1521 return VM_FAULT_SIGBUS
;
1525 if (unlikely(page
->mapping
!= inode
->i_mapping
||
1526 page_offset(page
) > i_size_read(inode
))) {
1527 /* Page got truncated out from underneath us */
1532 if (PagePrivate(page
))
1533 release_new_page_budget(c
);
1535 if (!PageChecked(page
))
1536 ubifs_convert_page_budget(c
);
1537 SetPagePrivate(page
);
1538 atomic_long_inc(&c
->dirty_pg_cnt
);
1539 __set_page_dirty_nobuffers(page
);
1544 struct ubifs_inode
*ui
= ubifs_inode(inode
);
1546 mutex_lock(&ui
->ui_mutex
);
1547 inode
->i_mtime
= inode
->i_ctime
= ubifs_current_time(inode
);
1548 release
= ui
->dirty
;
1549 mark_inode_dirty_sync(inode
);
1550 mutex_unlock(&ui
->ui_mutex
);
1552 ubifs_release_dirty_inode_budget(c
, ui
);
1555 wait_for_stable_page(page
);
1556 return VM_FAULT_LOCKED
;
1560 ubifs_release_budget(c
, &req
);
1562 err
= VM_FAULT_SIGBUS
;
1566 static const struct vm_operations_struct ubifs_file_vm_ops
= {
1567 .fault
= filemap_fault
,
1568 .page_mkwrite
= ubifs_vm_page_mkwrite
,
1569 .remap_pages
= generic_file_remap_pages
,
1572 static int ubifs_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1576 err
= generic_file_mmap(file
, vma
);
1579 vma
->vm_ops
= &ubifs_file_vm_ops
;
1584 long ubifs_fallocate(struct file
*file
, int mode
, loff_t offset
, loff_t len
)
1587 struct inode
*inode
= file
->f_mapping
->host
;
1588 struct ubifs_info
*c
= inode
->i_sb
->s_fs_info
;
1589 struct iattr newattrs
;
1591 loff_t new_len
= offset
+ len
;
1592 if (len
< 0 || offset
< 0)
1595 if(new_len
< inode
->i_size
)
1598 newattrs
.ia_size
= new_len
;
1599 newattrs
.ia_valid
= ATTR_SIZE
| ATTR_MTIME
|ATTR_CTIME
;
1600 newattrs
.ia_file
= file
;
1601 newattrs
.ia_valid
|= ATTR_FILE
;
1604 err
= do_setattr(c
, inode
, &newattrs
);
1608 const struct address_space_operations ubifs_file_address_operations
= {
1609 .readpage
= ubifs_readpage
,
1610 .writepage
= ubifs_writepage
,
1611 .write_begin
= ubifs_write_begin
,
1612 .write_end
= ubifs_write_end
,
1613 .invalidatepage
= ubifs_invalidatepage
,
1614 .set_page_dirty
= ubifs_set_page_dirty
,
1615 #ifdef CONFIG_MIGRATION
1616 .migratepage
= ubifs_migrate_page
,
1618 .releasepage
= ubifs_releasepage
,
1621 const struct inode_operations ubifs_file_inode_operations
= {
1622 .setattr
= ubifs_setattr
,
1623 .getattr
= ubifs_getattr
,
1624 .setxattr
= ubifs_setxattr
,
1625 .getxattr
= ubifs_getxattr
,
1626 .listxattr
= ubifs_listxattr
,
1627 .removexattr
= ubifs_removexattr
,
1630 const struct inode_operations ubifs_symlink_inode_operations
= {
1631 .readlink
= generic_readlink
,
1632 .follow_link
= ubifs_follow_link
,
1633 .setattr
= ubifs_setattr
,
1634 .getattr
= ubifs_getattr
,
1635 .setxattr
= ubifs_setxattr
,
1636 .getxattr
= ubifs_getxattr
,
1637 .listxattr
= ubifs_listxattr
,
1638 .removexattr
= ubifs_removexattr
,
1641 const struct file_operations ubifs_file_operations
= {
1642 .llseek
= generic_file_llseek
,
1643 .read
= do_sync_read
,
1644 .write
= do_sync_write
,
1645 .aio_read
= generic_file_aio_read
,
1646 .aio_write
= ubifs_aio_write
,
1647 .mmap
= ubifs_file_mmap
,
1648 .fsync
= ubifs_fsync
,
1649 .unlocked_ioctl
= ubifs_ioctl
,
1650 .splice_read
= generic_file_splice_read
,
1651 .splice_write
= generic_file_splice_write
,
1652 #ifdef CONFIG_COMPAT
1653 .compat_ioctl
= ubifs_compat_ioctl
,
1655 .fallocate
= ubifs_fallocate
,