4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/stat.h>
14 #include <linux/buffer_head.h>
15 #include <linux/writeback.h>
16 #include <linux/blkdev.h>
17 #include <linux/falloc.h>
18 #include <linux/types.h>
19 #include <linux/compat.h>
20 #include <linux/uaccess.h>
21 #include <linux/mount.h>
22 #include <linux/pagevec.h>
23 #include <linux/uio.h>
24 #include <linux/uuid.h>
25 #include <linux/file.h>
34 #include <trace/events/f2fs.h>
36 static int f2fs_filemap_fault(struct vm_fault
*vmf
)
38 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
41 down_read(&F2FS_I(inode
)->i_mmap_sem
);
42 err
= filemap_fault(vmf
);
43 up_read(&F2FS_I(inode
)->i_mmap_sem
);
48 static int f2fs_vm_page_mkwrite(struct vm_fault
*vmf
)
50 struct page
*page
= vmf
->page
;
51 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
52 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
53 struct dnode_of_data dn
;
56 if (unlikely(f2fs_cp_error(sbi
))) {
61 sb_start_pagefault(inode
->i_sb
);
63 f2fs_bug_on(sbi
, f2fs_has_inline_data(inode
));
65 /* block allocation */
67 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
68 err
= f2fs_reserve_block(&dn
, page
->index
);
76 f2fs_balance_fs(sbi
, dn
.node_changed
);
78 file_update_time(vmf
->vma
->vm_file
);
79 down_read(&F2FS_I(inode
)->i_mmap_sem
);
81 if (unlikely(page
->mapping
!= inode
->i_mapping
||
82 page_offset(page
) > i_size_read(inode
) ||
83 !PageUptodate(page
))) {
90 * check to see if the page is mapped already (no holes)
92 if (PageMappedToDisk(page
))
95 /* page is wholly or partially inside EOF */
96 if (((loff_t
)(page
->index
+ 1) << PAGE_SHIFT
) >
99 offset
= i_size_read(inode
) & ~PAGE_MASK
;
100 zero_user_segment(page
, offset
, PAGE_SIZE
);
102 set_page_dirty(page
);
103 if (!PageUptodate(page
))
104 SetPageUptodate(page
);
106 f2fs_update_iostat(sbi
, APP_MAPPED_IO
, F2FS_BLKSIZE
);
108 trace_f2fs_vm_page_mkwrite(page
, DATA
);
111 f2fs_wait_on_page_writeback(page
, DATA
, false);
113 /* wait for GCed page writeback via META_MAPPING */
114 if (f2fs_post_read_required(inode
))
115 f2fs_wait_on_block_writeback(sbi
, dn
.data_blkaddr
);
118 up_read(&F2FS_I(inode
)->i_mmap_sem
);
120 sb_end_pagefault(inode
->i_sb
);
121 f2fs_update_time(sbi
, REQ_TIME
);
123 return block_page_mkwrite_return(err
);
126 static const struct vm_operations_struct f2fs_file_vm_ops
= {
127 .fault
= f2fs_filemap_fault
,
128 .map_pages
= filemap_map_pages
,
129 .page_mkwrite
= f2fs_vm_page_mkwrite
,
132 static int get_parent_ino(struct inode
*inode
, nid_t
*pino
)
134 struct dentry
*dentry
;
136 inode
= igrab(inode
);
137 dentry
= d_find_any_alias(inode
);
142 *pino
= parent_ino(dentry
);
147 static inline enum cp_reason_type
need_do_checkpoint(struct inode
*inode
)
149 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
150 enum cp_reason_type cp_reason
= CP_NO_NEEDED
;
152 if (!S_ISREG(inode
->i_mode
))
153 cp_reason
= CP_NON_REGULAR
;
154 else if (inode
->i_nlink
!= 1)
155 cp_reason
= CP_HARDLINK
;
156 else if (is_sbi_flag_set(sbi
, SBI_NEED_CP
))
157 cp_reason
= CP_SB_NEED_CP
;
158 else if (file_wrong_pino(inode
))
159 cp_reason
= CP_WRONG_PINO
;
160 else if (!space_for_roll_forward(sbi
))
161 cp_reason
= CP_NO_SPC_ROLL
;
162 else if (!is_checkpointed_node(sbi
, F2FS_I(inode
)->i_pino
))
163 cp_reason
= CP_NODE_NEED_CP
;
164 else if (test_opt(sbi
, FASTBOOT
))
165 cp_reason
= CP_FASTBOOT_MODE
;
166 else if (F2FS_OPTION(sbi
).active_logs
== 2)
167 cp_reason
= CP_SPEC_LOG_NUM
;
168 else if (F2FS_OPTION(sbi
).fsync_mode
== FSYNC_MODE_STRICT
&&
169 need_dentry_mark(sbi
, inode
->i_ino
) &&
170 exist_written_data(sbi
, F2FS_I(inode
)->i_pino
, TRANS_DIR_INO
))
171 cp_reason
= CP_RECOVER_DIR
;
176 static bool need_inode_page_update(struct f2fs_sb_info
*sbi
, nid_t ino
)
178 struct page
*i
= find_get_page(NODE_MAPPING(sbi
), ino
);
180 /* But we need to avoid that there are some inode updates */
181 if ((i
&& PageDirty(i
)) || need_inode_block_update(sbi
, ino
))
187 static void try_to_fix_pino(struct inode
*inode
)
189 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
192 down_write(&fi
->i_sem
);
193 if (file_wrong_pino(inode
) && inode
->i_nlink
== 1 &&
194 get_parent_ino(inode
, &pino
)) {
195 f2fs_i_pino_write(inode
, pino
);
196 file_got_pino(inode
);
198 up_write(&fi
->i_sem
);
201 static int f2fs_do_sync_file(struct file
*file
, loff_t start
, loff_t end
,
202 int datasync
, bool atomic
)
204 struct inode
*inode
= file
->f_mapping
->host
;
205 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
206 nid_t ino
= inode
->i_ino
;
208 enum cp_reason_type cp_reason
= 0;
209 struct writeback_control wbc
= {
210 .sync_mode
= WB_SYNC_ALL
,
211 .nr_to_write
= LONG_MAX
,
215 if (unlikely(f2fs_readonly(inode
->i_sb
)))
218 trace_f2fs_sync_file_enter(inode
);
220 if (S_ISDIR(inode
->i_mode
))
223 /* if fdatasync is triggered, let's do in-place-update */
224 if (datasync
|| get_dirty_pages(inode
) <= SM_I(sbi
)->min_fsync_blocks
)
225 set_inode_flag(inode
, FI_NEED_IPU
);
226 ret
= file_write_and_wait_range(file
, start
, end
);
227 clear_inode_flag(inode
, FI_NEED_IPU
);
230 trace_f2fs_sync_file_exit(inode
, cp_reason
, datasync
, ret
);
234 /* if the inode is dirty, let's recover all the time */
235 if (!f2fs_skip_inode_update(inode
, datasync
)) {
236 f2fs_write_inode(inode
, NULL
);
241 * if there is no written data, don't waste time to write recovery info.
243 if (!is_inode_flag_set(inode
, FI_APPEND_WRITE
) &&
244 !exist_written_data(sbi
, ino
, APPEND_INO
)) {
246 /* it may call write_inode just prior to fsync */
247 if (need_inode_page_update(sbi
, ino
))
250 if (is_inode_flag_set(inode
, FI_UPDATE_WRITE
) ||
251 exist_written_data(sbi
, ino
, UPDATE_INO
))
257 * Both of fdatasync() and fsync() are able to be recovered from
260 down_read(&F2FS_I(inode
)->i_sem
);
261 cp_reason
= need_do_checkpoint(inode
);
262 up_read(&F2FS_I(inode
)->i_sem
);
265 /* all the dirty node pages should be flushed for POR */
266 ret
= f2fs_sync_fs(inode
->i_sb
, 1);
269 * We've secured consistency through sync_fs. Following pino
270 * will be used only for fsynced inodes after checkpoint.
272 try_to_fix_pino(inode
);
273 clear_inode_flag(inode
, FI_APPEND_WRITE
);
274 clear_inode_flag(inode
, FI_UPDATE_WRITE
);
278 ret
= fsync_node_pages(sbi
, inode
, &wbc
, atomic
);
282 /* if cp_error was enabled, we should avoid infinite loop */
283 if (unlikely(f2fs_cp_error(sbi
))) {
288 if (need_inode_block_update(sbi
, ino
)) {
289 f2fs_mark_inode_dirty_sync(inode
, true);
290 f2fs_write_inode(inode
, NULL
);
295 * If it's atomic_write, it's just fine to keep write ordering. So
296 * here we don't need to wait for node write completion, since we use
297 * node chain which serializes node blocks. If one of node writes are
298 * reordered, we can see simply broken chain, resulting in stopping
299 * roll-forward recovery. It means we'll recover all or none node blocks
303 ret
= wait_on_node_pages_writeback(sbi
, ino
);
308 /* once recovery info is written, don't need to tack this */
309 remove_ino_entry(sbi
, ino
, APPEND_INO
);
310 clear_inode_flag(inode
, FI_APPEND_WRITE
);
312 if (!atomic
&& F2FS_OPTION(sbi
).fsync_mode
!= FSYNC_MODE_NOBARRIER
)
313 ret
= f2fs_issue_flush(sbi
, inode
->i_ino
);
315 remove_ino_entry(sbi
, ino
, UPDATE_INO
);
316 clear_inode_flag(inode
, FI_UPDATE_WRITE
);
317 remove_ino_entry(sbi
, ino
, FLUSH_INO
);
319 f2fs_update_time(sbi
, REQ_TIME
);
321 trace_f2fs_sync_file_exit(inode
, cp_reason
, datasync
, ret
);
322 f2fs_trace_ios(NULL
, 1);
326 int f2fs_sync_file(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
328 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file
)))))
330 return f2fs_do_sync_file(file
, start
, end
, datasync
, false);
333 static pgoff_t
__get_first_dirty_index(struct address_space
*mapping
,
334 pgoff_t pgofs
, int whence
)
339 if (whence
!= SEEK_DATA
)
342 /* find first dirty page index */
343 pagevec_init(&pvec
, 0);
344 nr_pages
= pagevec_lookup_tag(&pvec
, mapping
, &pgofs
,
345 PAGECACHE_TAG_DIRTY
, 1);
346 pgofs
= nr_pages
? pvec
.pages
[0]->index
: ULONG_MAX
;
347 pagevec_release(&pvec
);
351 static bool __found_offset(struct f2fs_sb_info
*sbi
, block_t blkaddr
,
352 pgoff_t dirty
, pgoff_t pgofs
, int whence
)
356 if ((blkaddr
== NEW_ADDR
&& dirty
== pgofs
) ||
357 is_valid_data_blkaddr(sbi
, blkaddr
))
361 if (blkaddr
== NULL_ADDR
)
368 static loff_t
f2fs_seek_block(struct file
*file
, loff_t offset
, int whence
)
370 struct inode
*inode
= file
->f_mapping
->host
;
371 loff_t maxbytes
= inode
->i_sb
->s_maxbytes
;
372 struct dnode_of_data dn
;
373 pgoff_t pgofs
, end_offset
, dirty
;
374 loff_t data_ofs
= offset
;
380 isize
= i_size_read(inode
);
384 /* handle inline data case */
385 if (f2fs_has_inline_data(inode
) || f2fs_has_inline_dentry(inode
)) {
386 if (whence
== SEEK_HOLE
)
391 pgofs
= (pgoff_t
)(offset
>> PAGE_SHIFT
);
393 dirty
= __get_first_dirty_index(inode
->i_mapping
, pgofs
, whence
);
395 for (; data_ofs
< isize
; data_ofs
= (loff_t
)pgofs
<< PAGE_SHIFT
) {
396 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
397 err
= get_dnode_of_data(&dn
, pgofs
, LOOKUP_NODE
);
398 if (err
&& err
!= -ENOENT
) {
400 } else if (err
== -ENOENT
) {
401 /* direct node does not exists */
402 if (whence
== SEEK_DATA
) {
403 pgofs
= get_next_page_offset(&dn
, pgofs
);
410 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
412 /* find data/hole in dnode block */
413 for (; dn
.ofs_in_node
< end_offset
;
414 dn
.ofs_in_node
++, pgofs
++,
415 data_ofs
= (loff_t
)pgofs
<< PAGE_SHIFT
) {
417 blkaddr
= datablock_addr(dn
.inode
,
418 dn
.node_page
, dn
.ofs_in_node
);
420 if (__is_valid_data_blkaddr(blkaddr
) &&
421 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode
),
422 blkaddr
, DATA_GENERIC
)) {
427 if (__found_offset(F2FS_I_SB(inode
), blkaddr
, dirty
,
436 if (whence
== SEEK_DATA
)
439 if (whence
== SEEK_HOLE
&& data_ofs
> isize
)
442 return vfs_setpos(file
, data_ofs
, maxbytes
);
448 static loff_t
f2fs_llseek(struct file
*file
, loff_t offset
, int whence
)
450 struct inode
*inode
= file
->f_mapping
->host
;
451 loff_t maxbytes
= inode
->i_sb
->s_maxbytes
;
457 return generic_file_llseek_size(file
, offset
, whence
,
458 maxbytes
, i_size_read(inode
));
463 return f2fs_seek_block(file
, offset
, whence
);
469 static int f2fs_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
471 struct inode
*inode
= file_inode(file
);
474 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
477 /* we don't need to use inline_data strictly */
478 err
= f2fs_convert_inline_inode(inode
);
483 vma
->vm_ops
= &f2fs_file_vm_ops
;
487 static int f2fs_file_open(struct inode
*inode
, struct file
*filp
)
489 int err
= fscrypt_file_open(inode
, filp
);
494 filp
->f_mode
|= FMODE_NOWAIT
;
496 return dquot_file_open(inode
, filp
);
499 void truncate_data_blocks_range(struct dnode_of_data
*dn
, int count
)
501 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
502 struct f2fs_node
*raw_node
;
503 int nr_free
= 0, ofs
= dn
->ofs_in_node
, len
= count
;
507 if (IS_INODE(dn
->node_page
) && f2fs_has_extra_attr(dn
->inode
))
508 base
= get_extra_isize(dn
->inode
);
510 raw_node
= F2FS_NODE(dn
->node_page
);
511 addr
= blkaddr_in_node(raw_node
) + base
+ ofs
;
513 for (; count
> 0; count
--, addr
++, dn
->ofs_in_node
++) {
514 block_t blkaddr
= le32_to_cpu(*addr
);
515 if (blkaddr
== NULL_ADDR
)
518 dn
->data_blkaddr
= NULL_ADDR
;
519 set_data_blkaddr(dn
);
521 if (__is_valid_data_blkaddr(blkaddr
) &&
522 !f2fs_is_valid_blkaddr(sbi
, blkaddr
, DATA_GENERIC
))
525 invalidate_blocks(sbi
, blkaddr
);
526 if (dn
->ofs_in_node
== 0 && IS_INODE(dn
->node_page
))
527 clear_inode_flag(dn
->inode
, FI_FIRST_BLOCK_WRITTEN
);
534 * once we invalidate valid blkaddr in range [ofs, ofs + count],
535 * we will invalidate all blkaddr in the whole range.
537 fofs
= start_bidx_of_node(ofs_of_node(dn
->node_page
),
539 f2fs_update_extent_cache_range(dn
, fofs
, 0, len
);
540 dec_valid_block_count(sbi
, dn
->inode
, nr_free
);
542 dn
->ofs_in_node
= ofs
;
544 f2fs_update_time(sbi
, REQ_TIME
);
545 trace_f2fs_truncate_data_blocks_range(dn
->inode
, dn
->nid
,
546 dn
->ofs_in_node
, nr_free
);
549 void truncate_data_blocks(struct dnode_of_data
*dn
)
551 truncate_data_blocks_range(dn
, ADDRS_PER_BLOCK
);
554 static int truncate_partial_data_page(struct inode
*inode
, u64 from
,
557 unsigned offset
= from
& (PAGE_SIZE
- 1);
558 pgoff_t index
= from
>> PAGE_SHIFT
;
559 struct address_space
*mapping
= inode
->i_mapping
;
562 if (!offset
&& !cache_only
)
566 page
= find_lock_page(mapping
, index
);
567 if (page
&& PageUptodate(page
))
569 f2fs_put_page(page
, 1);
573 page
= get_lock_data_page(inode
, index
, true);
575 return PTR_ERR(page
) == -ENOENT
? 0 : PTR_ERR(page
);
577 f2fs_wait_on_page_writeback(page
, DATA
, true);
578 zero_user(page
, offset
, PAGE_SIZE
- offset
);
580 /* An encrypted inode should have a key and truncate the last page. */
581 f2fs_bug_on(F2FS_I_SB(inode
), cache_only
&& f2fs_encrypted_inode(inode
));
583 set_page_dirty(page
);
584 f2fs_put_page(page
, 1);
588 int truncate_blocks(struct inode
*inode
, u64 from
, bool lock
)
590 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
591 struct dnode_of_data dn
;
593 int count
= 0, err
= 0;
595 bool truncate_page
= false;
597 trace_f2fs_truncate_blocks_enter(inode
, from
);
599 free_from
= (pgoff_t
)F2FS_BLK_ALIGN(from
);
601 if (free_from
>= sbi
->max_file_blocks
)
607 ipage
= get_node_page(sbi
, inode
->i_ino
);
609 err
= PTR_ERR(ipage
);
613 if (f2fs_has_inline_data(inode
)) {
614 truncate_inline_inode(inode
, ipage
, from
);
615 f2fs_put_page(ipage
, 1);
616 truncate_page
= true;
620 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
621 err
= get_dnode_of_data(&dn
, free_from
, LOOKUP_NODE_RA
);
628 count
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
630 count
-= dn
.ofs_in_node
;
631 f2fs_bug_on(sbi
, count
< 0);
633 if (dn
.ofs_in_node
|| IS_INODE(dn
.node_page
)) {
634 truncate_data_blocks_range(&dn
, count
);
640 err
= truncate_inode_blocks(inode
, free_from
);
645 /* lastly zero out the first data page */
647 err
= truncate_partial_data_page(inode
, from
, truncate_page
);
649 trace_f2fs_truncate_blocks_exit(inode
, err
);
653 int f2fs_truncate(struct inode
*inode
)
657 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
660 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
661 S_ISLNK(inode
->i_mode
)))
664 trace_f2fs_truncate(inode
);
666 #ifdef CONFIG_F2FS_FAULT_INJECTION
667 if (time_to_inject(F2FS_I_SB(inode
), FAULT_TRUNCATE
)) {
668 f2fs_show_injection_info(FAULT_TRUNCATE
);
672 /* we should check inline_data size */
673 if (!f2fs_may_inline_data(inode
)) {
674 err
= f2fs_convert_inline_inode(inode
);
679 err
= truncate_blocks(inode
, i_size_read(inode
), true);
683 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
684 f2fs_mark_inode_dirty_sync(inode
, false);
688 int f2fs_getattr(const struct path
*path
, struct kstat
*stat
,
689 u32 request_mask
, unsigned int query_flags
)
691 struct inode
*inode
= d_inode(path
->dentry
);
692 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
693 struct f2fs_inode
*ri
;
696 if (f2fs_has_extra_attr(inode
) &&
697 f2fs_sb_has_inode_crtime(inode
->i_sb
) &&
698 F2FS_FITS_IN_INODE(ri
, fi
->i_extra_isize
, i_crtime
)) {
699 stat
->result_mask
|= STATX_BTIME
;
700 stat
->btime
.tv_sec
= fi
->i_crtime
.tv_sec
;
701 stat
->btime
.tv_nsec
= fi
->i_crtime
.tv_nsec
;
704 flags
= fi
->i_flags
& (FS_FL_USER_VISIBLE
| FS_PROJINHERIT_FL
);
705 if (flags
& FS_APPEND_FL
)
706 stat
->attributes
|= STATX_ATTR_APPEND
;
707 if (flags
& FS_COMPR_FL
)
708 stat
->attributes
|= STATX_ATTR_COMPRESSED
;
709 if (f2fs_encrypted_inode(inode
))
710 stat
->attributes
|= STATX_ATTR_ENCRYPTED
;
711 if (flags
& FS_IMMUTABLE_FL
)
712 stat
->attributes
|= STATX_ATTR_IMMUTABLE
;
713 if (flags
& FS_NODUMP_FL
)
714 stat
->attributes
|= STATX_ATTR_NODUMP
;
716 stat
->attributes_mask
|= (STATX_ATTR_APPEND
|
717 STATX_ATTR_COMPRESSED
|
718 STATX_ATTR_ENCRYPTED
|
719 STATX_ATTR_IMMUTABLE
|
722 generic_fillattr(inode
, stat
);
724 /* we need to show initial sectors used for inline_data/dentries */
725 if ((S_ISREG(inode
->i_mode
) && f2fs_has_inline_data(inode
)) ||
726 f2fs_has_inline_dentry(inode
))
727 stat
->blocks
+= (stat
->size
+ 511) >> 9;
732 #ifdef CONFIG_F2FS_FS_POSIX_ACL
733 static void __setattr_copy(struct inode
*inode
, const struct iattr
*attr
)
735 unsigned int ia_valid
= attr
->ia_valid
;
737 if (ia_valid
& ATTR_UID
)
738 inode
->i_uid
= attr
->ia_uid
;
739 if (ia_valid
& ATTR_GID
)
740 inode
->i_gid
= attr
->ia_gid
;
741 if (ia_valid
& ATTR_ATIME
)
742 inode
->i_atime
= timespec_trunc(attr
->ia_atime
,
743 inode
->i_sb
->s_time_gran
);
744 if (ia_valid
& ATTR_MTIME
)
745 inode
->i_mtime
= timespec_trunc(attr
->ia_mtime
,
746 inode
->i_sb
->s_time_gran
);
747 if (ia_valid
& ATTR_CTIME
)
748 inode
->i_ctime
= timespec_trunc(attr
->ia_ctime
,
749 inode
->i_sb
->s_time_gran
);
750 if (ia_valid
& ATTR_MODE
) {
751 umode_t mode
= attr
->ia_mode
;
753 if (!in_group_p(inode
->i_gid
) && !capable(CAP_FSETID
))
755 set_acl_inode(inode
, mode
);
759 #define __setattr_copy setattr_copy
762 int f2fs_setattr(struct dentry
*dentry
, struct iattr
*attr
)
764 struct inode
*inode
= d_inode(dentry
);
766 bool size_changed
= false;
768 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
771 err
= setattr_prepare(dentry
, attr
);
775 err
= fscrypt_prepare_setattr(dentry
, attr
);
779 if (is_quota_modification(inode
, attr
)) {
780 err
= dquot_initialize(inode
);
784 if ((attr
->ia_valid
& ATTR_UID
&&
785 !uid_eq(attr
->ia_uid
, inode
->i_uid
)) ||
786 (attr
->ia_valid
& ATTR_GID
&&
787 !gid_eq(attr
->ia_gid
, inode
->i_gid
))) {
788 err
= dquot_transfer(inode
, attr
);
793 if (attr
->ia_valid
& ATTR_SIZE
) {
794 if (attr
->ia_size
<= i_size_read(inode
)) {
795 down_write(&F2FS_I(inode
)->i_mmap_sem
);
796 truncate_setsize(inode
, attr
->ia_size
);
797 err
= f2fs_truncate(inode
);
798 up_write(&F2FS_I(inode
)->i_mmap_sem
);
803 * do not trim all blocks after i_size if target size is
804 * larger than i_size.
806 down_write(&F2FS_I(inode
)->i_mmap_sem
);
807 truncate_setsize(inode
, attr
->ia_size
);
808 up_write(&F2FS_I(inode
)->i_mmap_sem
);
810 /* should convert inline inode here */
811 if (!f2fs_may_inline_data(inode
)) {
812 err
= f2fs_convert_inline_inode(inode
);
816 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
819 down_write(&F2FS_I(inode
)->i_sem
);
820 F2FS_I(inode
)->last_disk_size
= i_size_read(inode
);
821 up_write(&F2FS_I(inode
)->i_sem
);
826 __setattr_copy(inode
, attr
);
828 if (attr
->ia_valid
& ATTR_MODE
) {
829 err
= posix_acl_chmod(inode
, get_inode_mode(inode
));
830 if (err
|| is_inode_flag_set(inode
, FI_ACL_MODE
)) {
831 inode
->i_mode
= F2FS_I(inode
)->i_acl_mode
;
832 clear_inode_flag(inode
, FI_ACL_MODE
);
836 /* file size may changed here */
837 f2fs_mark_inode_dirty_sync(inode
, size_changed
);
839 /* inode change will produce dirty node pages flushed by checkpoint */
840 f2fs_balance_fs(F2FS_I_SB(inode
), true);
845 const struct inode_operations f2fs_file_inode_operations
= {
846 .getattr
= f2fs_getattr
,
847 .setattr
= f2fs_setattr
,
848 .get_acl
= f2fs_get_acl
,
849 .set_acl
= f2fs_set_acl
,
850 #ifdef CONFIG_F2FS_FS_XATTR
851 .listxattr
= f2fs_listxattr
,
853 .fiemap
= f2fs_fiemap
,
856 static int fill_zero(struct inode
*inode
, pgoff_t index
,
857 loff_t start
, loff_t len
)
859 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
865 f2fs_balance_fs(sbi
, true);
868 page
= get_new_data_page(inode
, NULL
, index
, false);
872 return PTR_ERR(page
);
874 f2fs_wait_on_page_writeback(page
, DATA
, true);
875 zero_user(page
, start
, len
);
876 set_page_dirty(page
);
877 f2fs_put_page(page
, 1);
881 int truncate_hole(struct inode
*inode
, pgoff_t pg_start
, pgoff_t pg_end
)
885 while (pg_start
< pg_end
) {
886 struct dnode_of_data dn
;
887 pgoff_t end_offset
, count
;
889 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
890 err
= get_dnode_of_data(&dn
, pg_start
, LOOKUP_NODE
);
892 if (err
== -ENOENT
) {
893 pg_start
= get_next_page_offset(&dn
, pg_start
);
899 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
900 count
= min(end_offset
- dn
.ofs_in_node
, pg_end
- pg_start
);
902 f2fs_bug_on(F2FS_I_SB(inode
), count
== 0 || count
> end_offset
);
904 truncate_data_blocks_range(&dn
, count
);
912 static int punch_hole(struct inode
*inode
, loff_t offset
, loff_t len
)
914 pgoff_t pg_start
, pg_end
;
915 loff_t off_start
, off_end
;
918 ret
= f2fs_convert_inline_inode(inode
);
922 pg_start
= ((unsigned long long) offset
) >> PAGE_SHIFT
;
923 pg_end
= ((unsigned long long) offset
+ len
) >> PAGE_SHIFT
;
925 off_start
= offset
& (PAGE_SIZE
- 1);
926 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
928 if (pg_start
== pg_end
) {
929 ret
= fill_zero(inode
, pg_start
, off_start
,
930 off_end
- off_start
);
935 ret
= fill_zero(inode
, pg_start
++, off_start
,
936 PAGE_SIZE
- off_start
);
941 ret
= fill_zero(inode
, pg_end
, 0, off_end
);
946 if (pg_start
< pg_end
) {
947 struct address_space
*mapping
= inode
->i_mapping
;
948 loff_t blk_start
, blk_end
;
949 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
951 f2fs_balance_fs(sbi
, true);
953 blk_start
= (loff_t
)pg_start
<< PAGE_SHIFT
;
954 blk_end
= (loff_t
)pg_end
<< PAGE_SHIFT
;
955 down_write(&F2FS_I(inode
)->i_mmap_sem
);
956 truncate_inode_pages_range(mapping
, blk_start
,
960 ret
= truncate_hole(inode
, pg_start
, pg_end
);
962 up_write(&F2FS_I(inode
)->i_mmap_sem
);
969 static int __read_out_blkaddrs(struct inode
*inode
, block_t
*blkaddr
,
970 int *do_replace
, pgoff_t off
, pgoff_t len
)
972 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
973 struct dnode_of_data dn
;
977 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
978 ret
= get_dnode_of_data(&dn
, off
, LOOKUP_NODE_RA
);
979 if (ret
&& ret
!= -ENOENT
) {
981 } else if (ret
== -ENOENT
) {
982 if (dn
.max_level
== 0)
984 done
= min((pgoff_t
)ADDRS_PER_BLOCK
- dn
.ofs_in_node
, len
);
990 done
= min((pgoff_t
)ADDRS_PER_PAGE(dn
.node_page
, inode
) -
991 dn
.ofs_in_node
, len
);
992 for (i
= 0; i
< done
; i
++, blkaddr
++, do_replace
++, dn
.ofs_in_node
++) {
993 *blkaddr
= datablock_addr(dn
.inode
,
994 dn
.node_page
, dn
.ofs_in_node
);
995 if (!is_checkpointed_data(sbi
, *blkaddr
)) {
997 if (test_opt(sbi
, LFS
)) {
1002 /* do not invalidate this block address */
1003 f2fs_update_data_blkaddr(&dn
, NULL_ADDR
);
1007 f2fs_put_dnode(&dn
);
1016 static int __roll_back_blkaddrs(struct inode
*inode
, block_t
*blkaddr
,
1017 int *do_replace
, pgoff_t off
, int len
)
1019 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1020 struct dnode_of_data dn
;
1023 for (i
= 0; i
< len
; i
++, do_replace
++, blkaddr
++) {
1024 if (*do_replace
== 0)
1027 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1028 ret
= get_dnode_of_data(&dn
, off
+ i
, LOOKUP_NODE_RA
);
1030 dec_valid_block_count(sbi
, inode
, 1);
1031 invalidate_blocks(sbi
, *blkaddr
);
1033 f2fs_update_data_blkaddr(&dn
, *blkaddr
);
1035 f2fs_put_dnode(&dn
);
1040 static int __clone_blkaddrs(struct inode
*src_inode
, struct inode
*dst_inode
,
1041 block_t
*blkaddr
, int *do_replace
,
1042 pgoff_t src
, pgoff_t dst
, pgoff_t len
, bool full
)
1044 struct f2fs_sb_info
*sbi
= F2FS_I_SB(src_inode
);
1049 if (blkaddr
[i
] == NULL_ADDR
&& !full
) {
1054 if (do_replace
[i
] || blkaddr
[i
] == NULL_ADDR
) {
1055 struct dnode_of_data dn
;
1056 struct node_info ni
;
1060 set_new_dnode(&dn
, dst_inode
, NULL
, NULL
, 0);
1061 ret
= get_dnode_of_data(&dn
, dst
+ i
, ALLOC_NODE
);
1065 get_node_info(sbi
, dn
.nid
, &ni
);
1066 ilen
= min((pgoff_t
)
1067 ADDRS_PER_PAGE(dn
.node_page
, dst_inode
) -
1068 dn
.ofs_in_node
, len
- i
);
1070 dn
.data_blkaddr
= datablock_addr(dn
.inode
,
1071 dn
.node_page
, dn
.ofs_in_node
);
1072 truncate_data_blocks_range(&dn
, 1);
1074 if (do_replace
[i
]) {
1075 f2fs_i_blocks_write(src_inode
,
1077 f2fs_i_blocks_write(dst_inode
,
1079 f2fs_replace_block(sbi
, &dn
, dn
.data_blkaddr
,
1080 blkaddr
[i
], ni
.version
, true, false);
1086 new_size
= (dst
+ i
) << PAGE_SHIFT
;
1087 if (dst_inode
->i_size
< new_size
)
1088 f2fs_i_size_write(dst_inode
, new_size
);
1089 } while (--ilen
&& (do_replace
[i
] || blkaddr
[i
] == NULL_ADDR
));
1091 f2fs_put_dnode(&dn
);
1093 struct page
*psrc
, *pdst
;
1095 psrc
= get_lock_data_page(src_inode
, src
+ i
, true);
1097 return PTR_ERR(psrc
);
1098 pdst
= get_new_data_page(dst_inode
, NULL
, dst
+ i
,
1101 f2fs_put_page(psrc
, 1);
1102 return PTR_ERR(pdst
);
1104 f2fs_copy_page(psrc
, pdst
);
1105 set_page_dirty(pdst
);
1106 f2fs_put_page(pdst
, 1);
1107 f2fs_put_page(psrc
, 1);
1109 ret
= truncate_hole(src_inode
, src
+ i
, src
+ i
+ 1);
1118 static int __exchange_data_block(struct inode
*src_inode
,
1119 struct inode
*dst_inode
, pgoff_t src
, pgoff_t dst
,
1120 pgoff_t len
, bool full
)
1122 block_t
*src_blkaddr
;
1128 olen
= min((pgoff_t
)4 * ADDRS_PER_BLOCK
, len
);
1130 src_blkaddr
= f2fs_kvzalloc(F2FS_I_SB(src_inode
),
1131 sizeof(block_t
) * olen
, GFP_KERNEL
);
1135 do_replace
= f2fs_kvzalloc(F2FS_I_SB(src_inode
),
1136 sizeof(int) * olen
, GFP_KERNEL
);
1138 kvfree(src_blkaddr
);
1142 ret
= __read_out_blkaddrs(src_inode
, src_blkaddr
,
1143 do_replace
, src
, olen
);
1147 ret
= __clone_blkaddrs(src_inode
, dst_inode
, src_blkaddr
,
1148 do_replace
, src
, dst
, olen
, full
);
1156 kvfree(src_blkaddr
);
1162 __roll_back_blkaddrs(src_inode
, src_blkaddr
, do_replace
, src
, len
);
1163 kvfree(src_blkaddr
);
1168 static int f2fs_do_collapse(struct inode
*inode
, pgoff_t start
, pgoff_t end
)
1170 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1171 pgoff_t nrpages
= (i_size_read(inode
) + PAGE_SIZE
- 1) / PAGE_SIZE
;
1174 f2fs_balance_fs(sbi
, true);
1177 f2fs_drop_extent_tree(inode
);
1179 ret
= __exchange_data_block(inode
, inode
, end
, start
, nrpages
- end
, true);
1180 f2fs_unlock_op(sbi
);
1184 static int f2fs_collapse_range(struct inode
*inode
, loff_t offset
, loff_t len
)
1186 pgoff_t pg_start
, pg_end
;
1190 if (offset
+ len
>= i_size_read(inode
))
1193 /* collapse range should be aligned to block size of f2fs. */
1194 if (offset
& (F2FS_BLKSIZE
- 1) || len
& (F2FS_BLKSIZE
- 1))
1197 ret
= f2fs_convert_inline_inode(inode
);
1201 pg_start
= offset
>> PAGE_SHIFT
;
1202 pg_end
= (offset
+ len
) >> PAGE_SHIFT
;
1204 /* avoid gc operation during block exchange */
1205 down_write(&F2FS_I(inode
)->dio_rwsem
[WRITE
]);
1207 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1208 /* write out all dirty pages from offset */
1209 ret
= filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1213 truncate_pagecache(inode
, offset
);
1215 ret
= f2fs_do_collapse(inode
, pg_start
, pg_end
);
1219 /* write out all moved pages, if possible */
1220 filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1221 truncate_pagecache(inode
, offset
);
1223 new_size
= i_size_read(inode
) - len
;
1224 truncate_pagecache(inode
, new_size
);
1226 ret
= truncate_blocks(inode
, new_size
, true);
1228 f2fs_i_size_write(inode
, new_size
);
1230 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1231 up_write(&F2FS_I(inode
)->dio_rwsem
[WRITE
]);
1235 static int f2fs_do_zero_range(struct dnode_of_data
*dn
, pgoff_t start
,
1238 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
1239 pgoff_t index
= start
;
1240 unsigned int ofs_in_node
= dn
->ofs_in_node
;
1244 for (; index
< end
; index
++, dn
->ofs_in_node
++) {
1245 if (datablock_addr(dn
->inode
, dn
->node_page
,
1246 dn
->ofs_in_node
) == NULL_ADDR
)
1250 dn
->ofs_in_node
= ofs_in_node
;
1251 ret
= reserve_new_blocks(dn
, count
);
1255 dn
->ofs_in_node
= ofs_in_node
;
1256 for (index
= start
; index
< end
; index
++, dn
->ofs_in_node
++) {
1257 dn
->data_blkaddr
= datablock_addr(dn
->inode
,
1258 dn
->node_page
, dn
->ofs_in_node
);
1260 * reserve_new_blocks will not guarantee entire block
1263 if (dn
->data_blkaddr
== NULL_ADDR
) {
1267 if (dn
->data_blkaddr
!= NEW_ADDR
) {
1268 invalidate_blocks(sbi
, dn
->data_blkaddr
);
1269 dn
->data_blkaddr
= NEW_ADDR
;
1270 set_data_blkaddr(dn
);
1274 f2fs_update_extent_cache_range(dn
, start
, 0, index
- start
);
1279 static int f2fs_zero_range(struct inode
*inode
, loff_t offset
, loff_t len
,
1282 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1283 struct address_space
*mapping
= inode
->i_mapping
;
1284 pgoff_t index
, pg_start
, pg_end
;
1285 loff_t new_size
= i_size_read(inode
);
1286 loff_t off_start
, off_end
;
1289 ret
= inode_newsize_ok(inode
, (len
+ offset
));
1293 ret
= f2fs_convert_inline_inode(inode
);
1297 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1298 ret
= filemap_write_and_wait_range(mapping
, offset
, offset
+ len
- 1);
1302 truncate_pagecache_range(inode
, offset
, offset
+ len
- 1);
1304 pg_start
= ((unsigned long long) offset
) >> PAGE_SHIFT
;
1305 pg_end
= ((unsigned long long) offset
+ len
) >> PAGE_SHIFT
;
1307 off_start
= offset
& (PAGE_SIZE
- 1);
1308 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
1310 if (pg_start
== pg_end
) {
1311 ret
= fill_zero(inode
, pg_start
, off_start
,
1312 off_end
- off_start
);
1316 new_size
= max_t(loff_t
, new_size
, offset
+ len
);
1319 ret
= fill_zero(inode
, pg_start
++, off_start
,
1320 PAGE_SIZE
- off_start
);
1324 new_size
= max_t(loff_t
, new_size
,
1325 (loff_t
)pg_start
<< PAGE_SHIFT
);
1328 for (index
= pg_start
; index
< pg_end
;) {
1329 struct dnode_of_data dn
;
1330 unsigned int end_offset
;
1335 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1336 ret
= get_dnode_of_data(&dn
, index
, ALLOC_NODE
);
1338 f2fs_unlock_op(sbi
);
1342 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
1343 end
= min(pg_end
, end_offset
- dn
.ofs_in_node
+ index
);
1345 ret
= f2fs_do_zero_range(&dn
, index
, end
);
1346 f2fs_put_dnode(&dn
);
1347 f2fs_unlock_op(sbi
);
1349 f2fs_balance_fs(sbi
, dn
.node_changed
);
1355 new_size
= max_t(loff_t
, new_size
,
1356 (loff_t
)index
<< PAGE_SHIFT
);
1360 ret
= fill_zero(inode
, pg_end
, 0, off_end
);
1364 new_size
= max_t(loff_t
, new_size
, offset
+ len
);
1369 if (new_size
> i_size_read(inode
)) {
1370 if (mode
& FALLOC_FL_KEEP_SIZE
)
1371 file_set_keep_isize(inode
);
1373 f2fs_i_size_write(inode
, new_size
);
1376 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1381 static int f2fs_insert_range(struct inode
*inode
, loff_t offset
, loff_t len
)
1383 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1384 pgoff_t nr
, pg_start
, pg_end
, delta
, idx
;
1388 new_size
= i_size_read(inode
) + len
;
1389 ret
= inode_newsize_ok(inode
, new_size
);
1393 if (offset
>= i_size_read(inode
))
1396 /* insert range should be aligned to block size of f2fs. */
1397 if (offset
& (F2FS_BLKSIZE
- 1) || len
& (F2FS_BLKSIZE
- 1))
1400 ret
= f2fs_convert_inline_inode(inode
);
1404 f2fs_balance_fs(sbi
, true);
1406 /* avoid gc operation during block exchange */
1407 down_write(&F2FS_I(inode
)->dio_rwsem
[WRITE
]);
1409 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1410 ret
= truncate_blocks(inode
, i_size_read(inode
), true);
1414 /* write out all dirty pages from offset */
1415 ret
= filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1419 truncate_pagecache(inode
, offset
);
1421 pg_start
= offset
>> PAGE_SHIFT
;
1422 pg_end
= (offset
+ len
) >> PAGE_SHIFT
;
1423 delta
= pg_end
- pg_start
;
1424 idx
= (i_size_read(inode
) + PAGE_SIZE
- 1) / PAGE_SIZE
;
1426 while (!ret
&& idx
> pg_start
) {
1427 nr
= idx
- pg_start
;
1433 f2fs_drop_extent_tree(inode
);
1435 ret
= __exchange_data_block(inode
, inode
, idx
,
1436 idx
+ delta
, nr
, false);
1437 f2fs_unlock_op(sbi
);
1440 /* write out all moved pages, if possible */
1441 filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1442 truncate_pagecache(inode
, offset
);
1445 f2fs_i_size_write(inode
, new_size
);
1447 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1448 up_write(&F2FS_I(inode
)->dio_rwsem
[WRITE
]);
1452 static int expand_inode_data(struct inode
*inode
, loff_t offset
,
1453 loff_t len
, int mode
)
1455 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1456 struct f2fs_map_blocks map
= { .m_next_pgofs
= NULL
,
1457 .m_next_extent
= NULL
, .m_seg_type
= NO_CHECK_TYPE
};
1459 loff_t new_size
= i_size_read(inode
);
1463 err
= inode_newsize_ok(inode
, (len
+ offset
));
1467 err
= f2fs_convert_inline_inode(inode
);
1471 f2fs_balance_fs(sbi
, true);
1473 pg_end
= ((unsigned long long)offset
+ len
) >> PAGE_SHIFT
;
1474 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
1476 map
.m_lblk
= ((unsigned long long)offset
) >> PAGE_SHIFT
;
1477 map
.m_len
= pg_end
- map
.m_lblk
;
1481 err
= f2fs_map_blocks(inode
, &map
, 1, F2FS_GET_BLOCK_PRE_AIO
);
1488 last_off
= map
.m_lblk
+ map
.m_len
- 1;
1490 /* update new size to the failed position */
1491 new_size
= (last_off
== pg_end
) ? offset
+ len
:
1492 (loff_t
)(last_off
+ 1) << PAGE_SHIFT
;
1494 new_size
= ((loff_t
)pg_end
<< PAGE_SHIFT
) + off_end
;
1497 if (new_size
> i_size_read(inode
)) {
1498 if (mode
& FALLOC_FL_KEEP_SIZE
)
1499 file_set_keep_isize(inode
);
1501 f2fs_i_size_write(inode
, new_size
);
1507 static long f2fs_fallocate(struct file
*file
, int mode
,
1508 loff_t offset
, loff_t len
)
1510 struct inode
*inode
= file_inode(file
);
1513 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
1516 /* f2fs only support ->fallocate for regular file */
1517 if (!S_ISREG(inode
->i_mode
))
1520 if (f2fs_encrypted_inode(inode
) &&
1521 (mode
& (FALLOC_FL_COLLAPSE_RANGE
| FALLOC_FL_INSERT_RANGE
)))
1524 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
|
1525 FALLOC_FL_COLLAPSE_RANGE
| FALLOC_FL_ZERO_RANGE
|
1526 FALLOC_FL_INSERT_RANGE
))
1531 if (mode
& FALLOC_FL_PUNCH_HOLE
) {
1532 if (offset
>= inode
->i_size
)
1535 ret
= punch_hole(inode
, offset
, len
);
1536 } else if (mode
& FALLOC_FL_COLLAPSE_RANGE
) {
1537 ret
= f2fs_collapse_range(inode
, offset
, len
);
1538 } else if (mode
& FALLOC_FL_ZERO_RANGE
) {
1539 ret
= f2fs_zero_range(inode
, offset
, len
, mode
);
1540 } else if (mode
& FALLOC_FL_INSERT_RANGE
) {
1541 ret
= f2fs_insert_range(inode
, offset
, len
);
1543 ret
= expand_inode_data(inode
, offset
, len
, mode
);
1547 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
1548 f2fs_mark_inode_dirty_sync(inode
, false);
1549 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1553 inode_unlock(inode
);
1555 trace_f2fs_fallocate(inode
, mode
, offset
, len
, ret
);
1559 static int f2fs_release_file(struct inode
*inode
, struct file
*filp
)
1562 * f2fs_relase_file is called at every close calls. So we should
1563 * not drop any inmemory pages by close called by other process.
1565 if (!(filp
->f_mode
& FMODE_WRITE
) ||
1566 atomic_read(&inode
->i_writecount
) != 1)
1569 /* some remained atomic pages should discarded */
1570 if (f2fs_is_atomic_file(inode
))
1571 drop_inmem_pages(inode
);
1572 if (f2fs_is_volatile_file(inode
)) {
1573 clear_inode_flag(inode
, FI_VOLATILE_FILE
);
1574 stat_dec_volatile_write(inode
);
1575 set_inode_flag(inode
, FI_DROP_CACHE
);
1576 filemap_fdatawrite(inode
->i_mapping
);
1577 clear_inode_flag(inode
, FI_DROP_CACHE
);
1582 static int f2fs_file_flush(struct file
*file
, fl_owner_t id
)
1584 struct inode
*inode
= file_inode(file
);
1587 * If the process doing a transaction is crashed, we should do
1588 * roll-back. Otherwise, other reader/write can see corrupted database
1589 * until all the writers close its file. Since this should be done
1590 * before dropping file lock, it needs to do in ->flush.
1592 if (f2fs_is_atomic_file(inode
) &&
1593 F2FS_I(inode
)->inmem_task
== current
)
1594 drop_inmem_pages(inode
);
1598 static int f2fs_ioc_getflags(struct file
*filp
, unsigned long arg
)
1600 struct inode
*inode
= file_inode(filp
);
1601 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1602 unsigned int flags
= fi
->i_flags
&
1603 (FS_FL_USER_VISIBLE
| FS_PROJINHERIT_FL
);
1604 return put_user(flags
, (int __user
*)arg
);
1607 static int __f2fs_ioc_setflags(struct inode
*inode
, unsigned int flags
)
1609 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1610 unsigned int oldflags
;
1612 /* Is it quota file? Do not allow user to mess with it */
1613 if (IS_NOQUOTA(inode
))
1616 flags
= f2fs_mask_flags(inode
->i_mode
, flags
);
1618 oldflags
= fi
->i_flags
;
1620 if ((flags
^ oldflags
) & (FS_APPEND_FL
| FS_IMMUTABLE_FL
))
1621 if (!capable(CAP_LINUX_IMMUTABLE
))
1624 flags
= flags
& (FS_FL_USER_MODIFIABLE
| FS_PROJINHERIT_FL
);
1625 flags
|= oldflags
& ~(FS_FL_USER_MODIFIABLE
| FS_PROJINHERIT_FL
);
1626 fi
->i_flags
= flags
;
1628 if (fi
->i_flags
& FS_PROJINHERIT_FL
)
1629 set_inode_flag(inode
, FI_PROJ_INHERIT
);
1631 clear_inode_flag(inode
, FI_PROJ_INHERIT
);
1633 inode
->i_ctime
= current_time(inode
);
1634 f2fs_set_inode_flags(inode
);
1635 f2fs_mark_inode_dirty_sync(inode
, false);
1639 static int f2fs_ioc_setflags(struct file
*filp
, unsigned long arg
)
1641 struct inode
*inode
= file_inode(filp
);
1645 if (!inode_owner_or_capable(inode
))
1648 if (get_user(flags
, (int __user
*)arg
))
1651 ret
= mnt_want_write_file(filp
);
1657 ret
= __f2fs_ioc_setflags(inode
, flags
);
1659 inode_unlock(inode
);
1660 mnt_drop_write_file(filp
);
1664 static int f2fs_ioc_getversion(struct file
*filp
, unsigned long arg
)
1666 struct inode
*inode
= file_inode(filp
);
1668 return put_user(inode
->i_generation
, (int __user
*)arg
);
1671 static int f2fs_ioc_start_atomic_write(struct file
*filp
)
1673 struct inode
*inode
= file_inode(filp
);
1676 if (!inode_owner_or_capable(inode
))
1679 if (!S_ISREG(inode
->i_mode
))
1682 ret
= mnt_want_write_file(filp
);
1688 down_write(&F2FS_I(inode
)->dio_rwsem
[WRITE
]);
1690 if (f2fs_is_atomic_file(inode
))
1693 ret
= f2fs_convert_inline_inode(inode
);
1697 set_inode_flag(inode
, FI_ATOMIC_FILE
);
1698 set_inode_flag(inode
, FI_HOT_DATA
);
1699 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1701 if (!get_dirty_pages(inode
))
1704 f2fs_msg(F2FS_I_SB(inode
)->sb
, KERN_WARNING
,
1705 "Unexpected flush for atomic writes: ino=%lu, npages=%u",
1706 inode
->i_ino
, get_dirty_pages(inode
));
1707 ret
= filemap_write_and_wait_range(inode
->i_mapping
, 0, LLONG_MAX
);
1709 clear_inode_flag(inode
, FI_ATOMIC_FILE
);
1710 clear_inode_flag(inode
, FI_HOT_DATA
);
1715 F2FS_I(inode
)->inmem_task
= current
;
1716 stat_inc_atomic_write(inode
);
1717 stat_update_max_atomic_write(inode
);
1719 up_write(&F2FS_I(inode
)->dio_rwsem
[WRITE
]);
1720 inode_unlock(inode
);
1721 mnt_drop_write_file(filp
);
1725 static int f2fs_ioc_commit_atomic_write(struct file
*filp
)
1727 struct inode
*inode
= file_inode(filp
);
1730 if (!inode_owner_or_capable(inode
))
1733 ret
= mnt_want_write_file(filp
);
1739 down_write(&F2FS_I(inode
)->dio_rwsem
[WRITE
]);
1741 if (f2fs_is_volatile_file(inode
))
1744 if (f2fs_is_atomic_file(inode
)) {
1745 ret
= commit_inmem_pages(inode
);
1749 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 0, true);
1751 clear_inode_flag(inode
, FI_ATOMIC_FILE
);
1752 clear_inode_flag(inode
, FI_HOT_DATA
);
1753 stat_dec_atomic_write(inode
);
1756 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 1, false);
1759 up_write(&F2FS_I(inode
)->dio_rwsem
[WRITE
]);
1760 inode_unlock(inode
);
1761 mnt_drop_write_file(filp
);
1765 static int f2fs_ioc_start_volatile_write(struct file
*filp
)
1767 struct inode
*inode
= file_inode(filp
);
1770 if (!inode_owner_or_capable(inode
))
1773 if (!S_ISREG(inode
->i_mode
))
1776 ret
= mnt_want_write_file(filp
);
1782 if (f2fs_is_volatile_file(inode
))
1785 ret
= f2fs_convert_inline_inode(inode
);
1789 stat_inc_volatile_write(inode
);
1790 stat_update_max_volatile_write(inode
);
1792 set_inode_flag(inode
, FI_VOLATILE_FILE
);
1793 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1795 inode_unlock(inode
);
1796 mnt_drop_write_file(filp
);
1800 static int f2fs_ioc_release_volatile_write(struct file
*filp
)
1802 struct inode
*inode
= file_inode(filp
);
1805 if (!inode_owner_or_capable(inode
))
1808 ret
= mnt_want_write_file(filp
);
1814 if (!f2fs_is_volatile_file(inode
))
1817 if (!f2fs_is_first_block_written(inode
)) {
1818 ret
= truncate_partial_data_page(inode
, 0, true);
1822 ret
= punch_hole(inode
, 0, F2FS_BLKSIZE
);
1824 inode_unlock(inode
);
1825 mnt_drop_write_file(filp
);
1829 static int f2fs_ioc_abort_volatile_write(struct file
*filp
)
1831 struct inode
*inode
= file_inode(filp
);
1834 if (!inode_owner_or_capable(inode
))
1837 ret
= mnt_want_write_file(filp
);
1843 if (f2fs_is_atomic_file(inode
))
1844 drop_inmem_pages(inode
);
1845 if (f2fs_is_volatile_file(inode
)) {
1846 clear_inode_flag(inode
, FI_VOLATILE_FILE
);
1847 stat_dec_volatile_write(inode
);
1848 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 0, true);
1851 inode_unlock(inode
);
1853 mnt_drop_write_file(filp
);
1854 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1858 static int f2fs_ioc_shutdown(struct file
*filp
, unsigned long arg
)
1860 struct inode
*inode
= file_inode(filp
);
1861 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1862 struct super_block
*sb
= sbi
->sb
;
1866 if (!capable(CAP_SYS_ADMIN
))
1869 if (get_user(in
, (__u32 __user
*)arg
))
1872 if (in
!= F2FS_GOING_DOWN_FULLSYNC
) {
1873 ret
= mnt_want_write_file(filp
);
1879 case F2FS_GOING_DOWN_FULLSYNC
:
1880 sb
= freeze_bdev(sb
->s_bdev
);
1886 f2fs_stop_checkpoint(sbi
, false);
1887 thaw_bdev(sb
->s_bdev
, sb
);
1890 case F2FS_GOING_DOWN_METASYNC
:
1891 /* do checkpoint only */
1892 ret
= f2fs_sync_fs(sb
, 1);
1895 f2fs_stop_checkpoint(sbi
, false);
1897 case F2FS_GOING_DOWN_NOSYNC
:
1898 f2fs_stop_checkpoint(sbi
, false);
1900 case F2FS_GOING_DOWN_METAFLUSH
:
1901 sync_meta_pages(sbi
, META
, LONG_MAX
, FS_META_IO
);
1902 f2fs_stop_checkpoint(sbi
, false);
1909 stop_gc_thread(sbi
);
1910 stop_discard_thread(sbi
);
1912 drop_discard_cmd(sbi
);
1913 clear_opt(sbi
, DISCARD
);
1915 f2fs_update_time(sbi
, REQ_TIME
);
1917 if (in
!= F2FS_GOING_DOWN_FULLSYNC
)
1918 mnt_drop_write_file(filp
);
1922 static int f2fs_ioc_fitrim(struct file
*filp
, unsigned long arg
)
1924 struct inode
*inode
= file_inode(filp
);
1925 struct super_block
*sb
= inode
->i_sb
;
1926 struct request_queue
*q
= bdev_get_queue(sb
->s_bdev
);
1927 struct fstrim_range range
;
1930 if (!capable(CAP_SYS_ADMIN
))
1933 if (!blk_queue_discard(q
))
1936 if (copy_from_user(&range
, (struct fstrim_range __user
*)arg
,
1940 ret
= mnt_want_write_file(filp
);
1944 range
.minlen
= max((unsigned int)range
.minlen
,
1945 q
->limits
.discard_granularity
);
1946 ret
= f2fs_trim_fs(F2FS_SB(sb
), &range
);
1947 mnt_drop_write_file(filp
);
1951 if (copy_to_user((struct fstrim_range __user
*)arg
, &range
,
1954 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1958 static bool uuid_is_nonzero(__u8 u
[16])
1962 for (i
= 0; i
< 16; i
++)
1968 static int f2fs_ioc_set_encryption_policy(struct file
*filp
, unsigned long arg
)
1970 struct inode
*inode
= file_inode(filp
);
1972 if (!f2fs_sb_has_encrypt(inode
->i_sb
))
1975 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1977 return fscrypt_ioctl_set_policy(filp
, (const void __user
*)arg
);
1980 static int f2fs_ioc_get_encryption_policy(struct file
*filp
, unsigned long arg
)
1982 if (!f2fs_sb_has_encrypt(file_inode(filp
)->i_sb
))
1984 return fscrypt_ioctl_get_policy(filp
, (void __user
*)arg
);
1987 static int f2fs_ioc_get_encryption_pwsalt(struct file
*filp
, unsigned long arg
)
1989 struct inode
*inode
= file_inode(filp
);
1990 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1993 if (!f2fs_sb_has_encrypt(inode
->i_sb
))
1996 err
= mnt_want_write_file(filp
);
2000 down_write(&sbi
->sb_lock
);
2002 if (uuid_is_nonzero(sbi
->raw_super
->encrypt_pw_salt
))
2005 /* update superblock with uuid */
2006 generate_random_uuid(sbi
->raw_super
->encrypt_pw_salt
);
2008 err
= f2fs_commit_super(sbi
, false);
2011 memset(sbi
->raw_super
->encrypt_pw_salt
, 0, 16);
2015 if (copy_to_user((__u8 __user
*)arg
, sbi
->raw_super
->encrypt_pw_salt
,
2019 up_write(&sbi
->sb_lock
);
2020 mnt_drop_write_file(filp
);
2024 static int f2fs_ioc_gc(struct file
*filp
, unsigned long arg
)
2026 struct inode
*inode
= file_inode(filp
);
2027 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2031 if (!capable(CAP_SYS_ADMIN
))
2034 if (get_user(sync
, (__u32 __user
*)arg
))
2037 if (f2fs_readonly(sbi
->sb
))
2040 ret
= mnt_want_write_file(filp
);
2045 if (!mutex_trylock(&sbi
->gc_mutex
)) {
2050 mutex_lock(&sbi
->gc_mutex
);
2053 ret
= f2fs_gc(sbi
, sync
, true, NULL_SEGNO
);
2055 mnt_drop_write_file(filp
);
2059 static int f2fs_ioc_gc_range(struct file
*filp
, unsigned long arg
)
2061 struct inode
*inode
= file_inode(filp
);
2062 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2063 struct f2fs_gc_range range
;
2067 if (!capable(CAP_SYS_ADMIN
))
2070 if (copy_from_user(&range
, (struct f2fs_gc_range __user
*)arg
,
2074 if (f2fs_readonly(sbi
->sb
))
2077 ret
= mnt_want_write_file(filp
);
2081 end
= range
.start
+ range
.len
;
2082 if (range
.start
< MAIN_BLKADDR(sbi
) || end
>= MAX_BLKADDR(sbi
)) {
2088 if (!mutex_trylock(&sbi
->gc_mutex
)) {
2093 mutex_lock(&sbi
->gc_mutex
);
2096 ret
= f2fs_gc(sbi
, range
.sync
, true, GET_SEGNO(sbi
, range
.start
));
2097 range
.start
+= sbi
->blocks_per_seg
;
2098 if (range
.start
<= end
)
2101 mnt_drop_write_file(filp
);
2105 static int f2fs_ioc_write_checkpoint(struct file
*filp
, unsigned long arg
)
2107 struct inode
*inode
= file_inode(filp
);
2108 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2111 if (!capable(CAP_SYS_ADMIN
))
2114 if (f2fs_readonly(sbi
->sb
))
2117 ret
= mnt_want_write_file(filp
);
2121 ret
= f2fs_sync_fs(sbi
->sb
, 1);
2123 mnt_drop_write_file(filp
);
2127 static int f2fs_defragment_range(struct f2fs_sb_info
*sbi
,
2129 struct f2fs_defragment
*range
)
2131 struct inode
*inode
= file_inode(filp
);
2132 struct f2fs_map_blocks map
= { .m_next_extent
= NULL
,
2133 .m_seg_type
= NO_CHECK_TYPE
};
2134 struct extent_info ei
= {0,0,0};
2135 pgoff_t pg_start
, pg_end
, next_pgofs
;
2136 unsigned int blk_per_seg
= sbi
->blocks_per_seg
;
2137 unsigned int total
= 0, sec_num
;
2138 block_t blk_end
= 0;
2139 bool fragmented
= false;
2142 /* if in-place-update policy is enabled, don't waste time here */
2143 if (should_update_inplace(inode
, NULL
))
2146 pg_start
= range
->start
>> PAGE_SHIFT
;
2147 pg_end
= (range
->start
+ range
->len
) >> PAGE_SHIFT
;
2149 f2fs_balance_fs(sbi
, true);
2153 /* writeback all dirty pages in the range */
2154 err
= filemap_write_and_wait_range(inode
->i_mapping
, range
->start
,
2155 range
->start
+ range
->len
- 1);
2160 * lookup mapping info in extent cache, skip defragmenting if physical
2161 * block addresses are continuous.
2163 if (f2fs_lookup_extent_cache(inode
, pg_start
, &ei
)) {
2164 if (ei
.fofs
+ ei
.len
>= pg_end
)
2168 map
.m_lblk
= pg_start
;
2169 map
.m_next_pgofs
= &next_pgofs
;
2172 * lookup mapping info in dnode page cache, skip defragmenting if all
2173 * physical block addresses are continuous even if there are hole(s)
2174 * in logical blocks.
2176 while (map
.m_lblk
< pg_end
) {
2177 map
.m_len
= pg_end
- map
.m_lblk
;
2178 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_DEFAULT
);
2182 if (!(map
.m_flags
& F2FS_MAP_FLAGS
)) {
2183 map
.m_lblk
= next_pgofs
;
2187 if (blk_end
&& blk_end
!= map
.m_pblk
)
2190 /* record total count of block that we're going to move */
2193 blk_end
= map
.m_pblk
+ map
.m_len
;
2195 map
.m_lblk
+= map
.m_len
;
2201 sec_num
= (total
+ BLKS_PER_SEC(sbi
) - 1) / BLKS_PER_SEC(sbi
);
2204 * make sure there are enough free section for LFS allocation, this can
2205 * avoid defragment running in SSR mode when free section are allocated
2208 if (has_not_enough_free_secs(sbi
, 0, sec_num
)) {
2213 map
.m_lblk
= pg_start
;
2214 map
.m_len
= pg_end
- pg_start
;
2217 while (map
.m_lblk
< pg_end
) {
2222 map
.m_len
= pg_end
- map
.m_lblk
;
2223 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_DEFAULT
);
2227 if (!(map
.m_flags
& F2FS_MAP_FLAGS
)) {
2228 map
.m_lblk
= next_pgofs
;
2232 set_inode_flag(inode
, FI_DO_DEFRAG
);
2235 while (idx
< map
.m_lblk
+ map
.m_len
&& cnt
< blk_per_seg
) {
2238 page
= get_lock_data_page(inode
, idx
, true);
2240 err
= PTR_ERR(page
);
2244 set_page_dirty(page
);
2245 f2fs_put_page(page
, 1);
2254 if (idx
< pg_end
&& cnt
< blk_per_seg
)
2257 clear_inode_flag(inode
, FI_DO_DEFRAG
);
2259 err
= filemap_fdatawrite(inode
->i_mapping
);
2264 clear_inode_flag(inode
, FI_DO_DEFRAG
);
2266 inode_unlock(inode
);
2268 range
->len
= (u64
)total
<< PAGE_SHIFT
;
2272 static int f2fs_ioc_defragment(struct file
*filp
, unsigned long arg
)
2274 struct inode
*inode
= file_inode(filp
);
2275 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2276 struct f2fs_defragment range
;
2279 if (!capable(CAP_SYS_ADMIN
))
2282 if (!S_ISREG(inode
->i_mode
) || f2fs_is_atomic_file(inode
))
2285 if (f2fs_readonly(sbi
->sb
))
2288 if (copy_from_user(&range
, (struct f2fs_defragment __user
*)arg
,
2292 /* verify alignment of offset & size */
2293 if (range
.start
& (F2FS_BLKSIZE
- 1) || range
.len
& (F2FS_BLKSIZE
- 1))
2296 if (unlikely((range
.start
+ range
.len
) >> PAGE_SHIFT
>
2297 sbi
->max_file_blocks
))
2300 err
= mnt_want_write_file(filp
);
2304 err
= f2fs_defragment_range(sbi
, filp
, &range
);
2305 mnt_drop_write_file(filp
);
2307 f2fs_update_time(sbi
, REQ_TIME
);
2311 if (copy_to_user((struct f2fs_defragment __user
*)arg
, &range
,
2318 static int f2fs_move_file_range(struct file
*file_in
, loff_t pos_in
,
2319 struct file
*file_out
, loff_t pos_out
, size_t len
)
2321 struct inode
*src
= file_inode(file_in
);
2322 struct inode
*dst
= file_inode(file_out
);
2323 struct f2fs_sb_info
*sbi
= F2FS_I_SB(src
);
2324 size_t olen
= len
, dst_max_i_size
= 0;
2328 if (file_in
->f_path
.mnt
!= file_out
->f_path
.mnt
||
2329 src
->i_sb
!= dst
->i_sb
)
2332 if (unlikely(f2fs_readonly(src
->i_sb
)))
2335 if (!S_ISREG(src
->i_mode
) || !S_ISREG(dst
->i_mode
))
2338 if (f2fs_encrypted_inode(src
) || f2fs_encrypted_inode(dst
))
2342 if (pos_in
== pos_out
)
2344 if (pos_out
> pos_in
&& pos_out
< pos_in
+ len
)
2349 down_write(&F2FS_I(src
)->dio_rwsem
[WRITE
]);
2352 if (!inode_trylock(dst
))
2354 if (!down_write_trylock(&F2FS_I(dst
)->dio_rwsem
[WRITE
])) {
2361 if (pos_in
+ len
> src
->i_size
|| pos_in
+ len
< pos_in
)
2364 olen
= len
= src
->i_size
- pos_in
;
2365 if (pos_in
+ len
== src
->i_size
)
2366 len
= ALIGN(src
->i_size
, F2FS_BLKSIZE
) - pos_in
;
2372 dst_osize
= dst
->i_size
;
2373 if (pos_out
+ olen
> dst
->i_size
)
2374 dst_max_i_size
= pos_out
+ olen
;
2376 /* verify the end result is block aligned */
2377 if (!IS_ALIGNED(pos_in
, F2FS_BLKSIZE
) ||
2378 !IS_ALIGNED(pos_in
+ len
, F2FS_BLKSIZE
) ||
2379 !IS_ALIGNED(pos_out
, F2FS_BLKSIZE
))
2382 ret
= f2fs_convert_inline_inode(src
);
2386 ret
= f2fs_convert_inline_inode(dst
);
2390 /* write out all dirty pages from offset */
2391 ret
= filemap_write_and_wait_range(src
->i_mapping
,
2392 pos_in
, pos_in
+ len
);
2396 ret
= filemap_write_and_wait_range(dst
->i_mapping
,
2397 pos_out
, pos_out
+ len
);
2401 f2fs_balance_fs(sbi
, true);
2403 ret
= __exchange_data_block(src
, dst
, pos_in
>> F2FS_BLKSIZE_BITS
,
2404 pos_out
>> F2FS_BLKSIZE_BITS
,
2405 len
>> F2FS_BLKSIZE_BITS
, false);
2409 f2fs_i_size_write(dst
, dst_max_i_size
);
2410 else if (dst_osize
!= dst
->i_size
)
2411 f2fs_i_size_write(dst
, dst_osize
);
2413 f2fs_unlock_op(sbi
);
2416 up_write(&F2FS_I(dst
)->dio_rwsem
[WRITE
]);
2420 up_write(&F2FS_I(src
)->dio_rwsem
[WRITE
]);
2425 static int f2fs_ioc_move_range(struct file
*filp
, unsigned long arg
)
2427 struct f2fs_move_range range
;
2431 if (!(filp
->f_mode
& FMODE_READ
) ||
2432 !(filp
->f_mode
& FMODE_WRITE
))
2435 if (copy_from_user(&range
, (struct f2fs_move_range __user
*)arg
,
2439 dst
= fdget(range
.dst_fd
);
2443 if (!(dst
.file
->f_mode
& FMODE_WRITE
)) {
2448 err
= mnt_want_write_file(filp
);
2452 err
= f2fs_move_file_range(filp
, range
.pos_in
, dst
.file
,
2453 range
.pos_out
, range
.len
);
2455 mnt_drop_write_file(filp
);
2459 if (copy_to_user((struct f2fs_move_range __user
*)arg
,
2460 &range
, sizeof(range
)))
2467 static int f2fs_ioc_flush_device(struct file
*filp
, unsigned long arg
)
2469 struct inode
*inode
= file_inode(filp
);
2470 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2471 struct sit_info
*sm
= SIT_I(sbi
);
2472 unsigned int start_segno
= 0, end_segno
= 0;
2473 unsigned int dev_start_segno
= 0, dev_end_segno
= 0;
2474 struct f2fs_flush_device range
;
2477 if (!capable(CAP_SYS_ADMIN
))
2480 if (f2fs_readonly(sbi
->sb
))
2483 if (copy_from_user(&range
, (struct f2fs_flush_device __user
*)arg
,
2487 if (sbi
->s_ndevs
<= 1 || sbi
->s_ndevs
- 1 <= range
.dev_num
||
2488 sbi
->segs_per_sec
!= 1) {
2489 f2fs_msg(sbi
->sb
, KERN_WARNING
,
2490 "Can't flush %u in %d for segs_per_sec %u != 1\n",
2491 range
.dev_num
, sbi
->s_ndevs
,
2496 ret
= mnt_want_write_file(filp
);
2500 if (range
.dev_num
!= 0)
2501 dev_start_segno
= GET_SEGNO(sbi
, FDEV(range
.dev_num
).start_blk
);
2502 dev_end_segno
= GET_SEGNO(sbi
, FDEV(range
.dev_num
).end_blk
);
2504 start_segno
= sm
->last_victim
[FLUSH_DEVICE
];
2505 if (start_segno
< dev_start_segno
|| start_segno
>= dev_end_segno
)
2506 start_segno
= dev_start_segno
;
2507 end_segno
= min(start_segno
+ range
.segments
, dev_end_segno
);
2509 while (start_segno
< end_segno
) {
2510 if (!mutex_trylock(&sbi
->gc_mutex
)) {
2514 sm
->last_victim
[GC_CB
] = end_segno
+ 1;
2515 sm
->last_victim
[GC_GREEDY
] = end_segno
+ 1;
2516 sm
->last_victim
[ALLOC_NEXT
] = end_segno
+ 1;
2517 ret
= f2fs_gc(sbi
, true, true, start_segno
);
2525 mnt_drop_write_file(filp
);
2529 static int f2fs_ioc_get_features(struct file
*filp
, unsigned long arg
)
2531 struct inode
*inode
= file_inode(filp
);
2532 u32 sb_feature
= le32_to_cpu(F2FS_I_SB(inode
)->raw_super
->feature
);
2534 /* Must validate to set it with SQLite behavior in Android. */
2535 sb_feature
|= F2FS_FEATURE_ATOMIC_WRITE
;
2537 return put_user(sb_feature
, (u32 __user
*)arg
);
2541 static int f2fs_ioc_setproject(struct file
*filp
, __u32 projid
)
2543 struct inode
*inode
= file_inode(filp
);
2544 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2545 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2546 struct super_block
*sb
= sbi
->sb
;
2547 struct dquot
*transfer_to
[MAXQUOTAS
] = {};
2552 if (!f2fs_sb_has_project_quota(sb
)) {
2553 if (projid
!= F2FS_DEF_PROJID
)
2559 if (!f2fs_has_extra_attr(inode
))
2562 kprojid
= make_kprojid(&init_user_ns
, (projid_t
)projid
);
2564 if (projid_eq(kprojid
, F2FS_I(inode
)->i_projid
))
2567 err
= mnt_want_write_file(filp
);
2574 /* Is it quota file? Do not allow user to mess with it */
2575 if (IS_NOQUOTA(inode
))
2578 ipage
= get_node_page(sbi
, inode
->i_ino
);
2579 if (IS_ERR(ipage
)) {
2580 err
= PTR_ERR(ipage
);
2584 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage
), fi
->i_extra_isize
,
2587 f2fs_put_page(ipage
, 1);
2590 f2fs_put_page(ipage
, 1);
2592 err
= dquot_initialize(inode
);
2596 transfer_to
[PRJQUOTA
] = dqget(sb
, make_kqid_projid(kprojid
));
2597 if (!IS_ERR(transfer_to
[PRJQUOTA
])) {
2598 err
= __dquot_transfer(inode
, transfer_to
);
2599 dqput(transfer_to
[PRJQUOTA
]);
2604 F2FS_I(inode
)->i_projid
= kprojid
;
2605 inode
->i_ctime
= current_time(inode
);
2607 f2fs_mark_inode_dirty_sync(inode
, true);
2609 inode_unlock(inode
);
2610 mnt_drop_write_file(filp
);
2614 static int f2fs_ioc_setproject(struct file
*filp
, __u32 projid
)
2616 if (projid
!= F2FS_DEF_PROJID
)
2622 /* Transfer internal flags to xflags */
2623 static inline __u32
f2fs_iflags_to_xflags(unsigned long iflags
)
2627 if (iflags
& FS_SYNC_FL
)
2628 xflags
|= FS_XFLAG_SYNC
;
2629 if (iflags
& FS_IMMUTABLE_FL
)
2630 xflags
|= FS_XFLAG_IMMUTABLE
;
2631 if (iflags
& FS_APPEND_FL
)
2632 xflags
|= FS_XFLAG_APPEND
;
2633 if (iflags
& FS_NODUMP_FL
)
2634 xflags
|= FS_XFLAG_NODUMP
;
2635 if (iflags
& FS_NOATIME_FL
)
2636 xflags
|= FS_XFLAG_NOATIME
;
2637 if (iflags
& FS_PROJINHERIT_FL
)
2638 xflags
|= FS_XFLAG_PROJINHERIT
;
2642 #define F2FS_SUPPORTED_FS_XFLAGS (FS_XFLAG_SYNC | FS_XFLAG_IMMUTABLE | \
2643 FS_XFLAG_APPEND | FS_XFLAG_NODUMP | \
2644 FS_XFLAG_NOATIME | FS_XFLAG_PROJINHERIT)
2646 /* Flags we can manipulate with through EXT4_IOC_FSSETXATTR */
2647 #define F2FS_FL_XFLAG_VISIBLE (FS_SYNC_FL | \
2654 /* Transfer xflags flags to internal */
2655 static inline unsigned long f2fs_xflags_to_iflags(__u32 xflags
)
2657 unsigned long iflags
= 0;
2659 if (xflags
& FS_XFLAG_SYNC
)
2660 iflags
|= FS_SYNC_FL
;
2661 if (xflags
& FS_XFLAG_IMMUTABLE
)
2662 iflags
|= FS_IMMUTABLE_FL
;
2663 if (xflags
& FS_XFLAG_APPEND
)
2664 iflags
|= FS_APPEND_FL
;
2665 if (xflags
& FS_XFLAG_NODUMP
)
2666 iflags
|= FS_NODUMP_FL
;
2667 if (xflags
& FS_XFLAG_NOATIME
)
2668 iflags
|= FS_NOATIME_FL
;
2669 if (xflags
& FS_XFLAG_PROJINHERIT
)
2670 iflags
|= FS_PROJINHERIT_FL
;
2675 static int f2fs_ioc_fsgetxattr(struct file
*filp
, unsigned long arg
)
2677 struct inode
*inode
= file_inode(filp
);
2678 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2681 memset(&fa
, 0, sizeof(struct fsxattr
));
2682 fa
.fsx_xflags
= f2fs_iflags_to_xflags(fi
->i_flags
&
2683 (FS_FL_USER_VISIBLE
| FS_PROJINHERIT_FL
));
2685 if (f2fs_sb_has_project_quota(inode
->i_sb
))
2686 fa
.fsx_projid
= (__u32
)from_kprojid(&init_user_ns
,
2689 if (copy_to_user((struct fsxattr __user
*)arg
, &fa
, sizeof(fa
)))
2694 static int f2fs_ioc_fssetxattr(struct file
*filp
, unsigned long arg
)
2696 struct inode
*inode
= file_inode(filp
);
2697 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2702 if (copy_from_user(&fa
, (struct fsxattr __user
*)arg
, sizeof(fa
)))
2705 /* Make sure caller has proper permission */
2706 if (!inode_owner_or_capable(inode
))
2709 if (fa
.fsx_xflags
& ~F2FS_SUPPORTED_FS_XFLAGS
)
2712 flags
= f2fs_xflags_to_iflags(fa
.fsx_xflags
);
2713 if (f2fs_mask_flags(inode
->i_mode
, flags
) != flags
)
2716 err
= mnt_want_write_file(filp
);
2721 flags
= (fi
->i_flags
& ~F2FS_FL_XFLAG_VISIBLE
) |
2722 (flags
& F2FS_FL_XFLAG_VISIBLE
);
2723 err
= __f2fs_ioc_setflags(inode
, flags
);
2724 inode_unlock(inode
);
2725 mnt_drop_write_file(filp
);
2729 err
= f2fs_ioc_setproject(filp
, fa
.fsx_projid
);
2736 int f2fs_pin_file_control(struct inode
*inode
, bool inc
)
2738 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2739 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2741 /* Use i_gc_failures for normal file as a risk signal. */
2743 f2fs_i_gc_failures_write(inode
, fi
->i_gc_failures
+ 1);
2745 if (fi
->i_gc_failures
> sbi
->gc_pin_file_threshold
) {
2746 f2fs_msg(sbi
->sb
, KERN_WARNING
,
2747 "%s: Enable GC = ino %lx after %x GC trials\n",
2748 __func__
, inode
->i_ino
, fi
->i_gc_failures
);
2749 clear_inode_flag(inode
, FI_PIN_FILE
);
2755 static int f2fs_ioc_set_pin_file(struct file
*filp
, unsigned long arg
)
2757 struct inode
*inode
= file_inode(filp
);
2761 if (!inode_owner_or_capable(inode
))
2764 if (get_user(pin
, (__u32 __user
*)arg
))
2767 if (!S_ISREG(inode
->i_mode
))
2770 if (f2fs_readonly(F2FS_I_SB(inode
)->sb
))
2773 ret
= mnt_want_write_file(filp
);
2779 if (should_update_outplace(inode
, NULL
)) {
2785 clear_inode_flag(inode
, FI_PIN_FILE
);
2786 F2FS_I(inode
)->i_gc_failures
= 1;
2790 if (f2fs_pin_file_control(inode
, false)) {
2794 ret
= f2fs_convert_inline_inode(inode
);
2798 set_inode_flag(inode
, FI_PIN_FILE
);
2799 ret
= F2FS_I(inode
)->i_gc_failures
;
2801 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2803 inode_unlock(inode
);
2804 mnt_drop_write_file(filp
);
2808 static int f2fs_ioc_get_pin_file(struct file
*filp
, unsigned long arg
)
2810 struct inode
*inode
= file_inode(filp
);
2813 if (is_inode_flag_set(inode
, FI_PIN_FILE
))
2814 pin
= F2FS_I(inode
)->i_gc_failures
;
2815 return put_user(pin
, (u32 __user
*)arg
);
2818 int f2fs_precache_extents(struct inode
*inode
)
2820 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2821 struct f2fs_map_blocks map
;
2822 pgoff_t m_next_extent
;
2826 if (is_inode_flag_set(inode
, FI_NO_EXTENT
))
2830 map
.m_next_pgofs
= NULL
;
2831 map
.m_next_extent
= &m_next_extent
;
2832 map
.m_seg_type
= NO_CHECK_TYPE
;
2833 end
= F2FS_I_SB(inode
)->max_file_blocks
;
2835 while (map
.m_lblk
< end
) {
2836 map
.m_len
= end
- map
.m_lblk
;
2838 down_write(&fi
->dio_rwsem
[WRITE
]);
2839 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_PRECACHE
);
2840 up_write(&fi
->dio_rwsem
[WRITE
]);
2844 map
.m_lblk
= m_next_extent
;
2850 static int f2fs_ioc_precache_extents(struct file
*filp
, unsigned long arg
)
2852 return f2fs_precache_extents(file_inode(filp
));
2855 long f2fs_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
2857 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp
)))))
2861 case F2FS_IOC_GETFLAGS
:
2862 return f2fs_ioc_getflags(filp
, arg
);
2863 case F2FS_IOC_SETFLAGS
:
2864 return f2fs_ioc_setflags(filp
, arg
);
2865 case F2FS_IOC_GETVERSION
:
2866 return f2fs_ioc_getversion(filp
, arg
);
2867 case F2FS_IOC_START_ATOMIC_WRITE
:
2868 return f2fs_ioc_start_atomic_write(filp
);
2869 case F2FS_IOC_COMMIT_ATOMIC_WRITE
:
2870 return f2fs_ioc_commit_atomic_write(filp
);
2871 case F2FS_IOC_START_VOLATILE_WRITE
:
2872 return f2fs_ioc_start_volatile_write(filp
);
2873 case F2FS_IOC_RELEASE_VOLATILE_WRITE
:
2874 return f2fs_ioc_release_volatile_write(filp
);
2875 case F2FS_IOC_ABORT_VOLATILE_WRITE
:
2876 return f2fs_ioc_abort_volatile_write(filp
);
2877 case F2FS_IOC_SHUTDOWN
:
2878 return f2fs_ioc_shutdown(filp
, arg
);
2880 return f2fs_ioc_fitrim(filp
, arg
);
2881 case F2FS_IOC_SET_ENCRYPTION_POLICY
:
2882 return f2fs_ioc_set_encryption_policy(filp
, arg
);
2883 case F2FS_IOC_GET_ENCRYPTION_POLICY
:
2884 return f2fs_ioc_get_encryption_policy(filp
, arg
);
2885 case F2FS_IOC_GET_ENCRYPTION_PWSALT
:
2886 return f2fs_ioc_get_encryption_pwsalt(filp
, arg
);
2887 case F2FS_IOC_GARBAGE_COLLECT
:
2888 return f2fs_ioc_gc(filp
, arg
);
2889 case F2FS_IOC_GARBAGE_COLLECT_RANGE
:
2890 return f2fs_ioc_gc_range(filp
, arg
);
2891 case F2FS_IOC_WRITE_CHECKPOINT
:
2892 return f2fs_ioc_write_checkpoint(filp
, arg
);
2893 case F2FS_IOC_DEFRAGMENT
:
2894 return f2fs_ioc_defragment(filp
, arg
);
2895 case F2FS_IOC_MOVE_RANGE
:
2896 return f2fs_ioc_move_range(filp
, arg
);
2897 case F2FS_IOC_FLUSH_DEVICE
:
2898 return f2fs_ioc_flush_device(filp
, arg
);
2899 case F2FS_IOC_GET_FEATURES
:
2900 return f2fs_ioc_get_features(filp
, arg
);
2901 case F2FS_IOC_FSGETXATTR
:
2902 return f2fs_ioc_fsgetxattr(filp
, arg
);
2903 case F2FS_IOC_FSSETXATTR
:
2904 return f2fs_ioc_fssetxattr(filp
, arg
);
2905 case F2FS_IOC_GET_PIN_FILE
:
2906 return f2fs_ioc_get_pin_file(filp
, arg
);
2907 case F2FS_IOC_SET_PIN_FILE
:
2908 return f2fs_ioc_set_pin_file(filp
, arg
);
2909 case F2FS_IOC_PRECACHE_EXTENTS
:
2910 return f2fs_ioc_precache_extents(filp
, arg
);
2916 static ssize_t
f2fs_file_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
2918 struct file
*file
= iocb
->ki_filp
;
2919 struct inode
*inode
= file_inode(file
);
2920 struct blk_plug plug
;
2923 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
2926 if ((iocb
->ki_flags
& IOCB_NOWAIT
) && !(iocb
->ki_flags
& IOCB_DIRECT
))
2929 if (!inode_trylock(inode
)) {
2930 if (iocb
->ki_flags
& IOCB_NOWAIT
)
2935 ret
= generic_write_checks(iocb
, from
);
2937 bool preallocated
= false;
2938 size_t target_size
= 0;
2941 if (iov_iter_fault_in_readable(from
, iov_iter_count(from
)))
2942 set_inode_flag(inode
, FI_NO_PREALLOC
);
2944 if ((iocb
->ki_flags
& IOCB_NOWAIT
) &&
2945 (iocb
->ki_flags
& IOCB_DIRECT
)) {
2946 if (!f2fs_overwrite_io(inode
, iocb
->ki_pos
,
2947 iov_iter_count(from
)) ||
2948 f2fs_has_inline_data(inode
) ||
2949 f2fs_force_buffered_io(inode
, WRITE
)) {
2950 inode_unlock(inode
);
2955 preallocated
= true;
2956 target_size
= iocb
->ki_pos
+ iov_iter_count(from
);
2958 err
= f2fs_preallocate_blocks(iocb
, from
);
2960 clear_inode_flag(inode
, FI_NO_PREALLOC
);
2961 inode_unlock(inode
);
2965 blk_start_plug(&plug
);
2966 ret
= __generic_file_write_iter(iocb
, from
);
2967 blk_finish_plug(&plug
);
2968 clear_inode_flag(inode
, FI_NO_PREALLOC
);
2970 /* if we couldn't write data, we should deallocate blocks. */
2971 if (preallocated
&& i_size_read(inode
) < target_size
)
2972 f2fs_truncate(inode
);
2975 f2fs_update_iostat(F2FS_I_SB(inode
), APP_WRITE_IO
, ret
);
2977 inode_unlock(inode
);
2980 ret
= generic_write_sync(iocb
, ret
);
2984 #ifdef CONFIG_COMPAT
2985 long f2fs_compat_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
2988 case F2FS_IOC32_GETFLAGS
:
2989 cmd
= F2FS_IOC_GETFLAGS
;
2991 case F2FS_IOC32_SETFLAGS
:
2992 cmd
= F2FS_IOC_SETFLAGS
;
2994 case F2FS_IOC32_GETVERSION
:
2995 cmd
= F2FS_IOC_GETVERSION
;
2997 case F2FS_IOC_START_ATOMIC_WRITE
:
2998 case F2FS_IOC_COMMIT_ATOMIC_WRITE
:
2999 case F2FS_IOC_START_VOLATILE_WRITE
:
3000 case F2FS_IOC_RELEASE_VOLATILE_WRITE
:
3001 case F2FS_IOC_ABORT_VOLATILE_WRITE
:
3002 case F2FS_IOC_SHUTDOWN
:
3003 case F2FS_IOC_SET_ENCRYPTION_POLICY
:
3004 case F2FS_IOC_GET_ENCRYPTION_PWSALT
:
3005 case F2FS_IOC_GET_ENCRYPTION_POLICY
:
3006 case F2FS_IOC_GARBAGE_COLLECT
:
3007 case F2FS_IOC_GARBAGE_COLLECT_RANGE
:
3008 case F2FS_IOC_WRITE_CHECKPOINT
:
3009 case F2FS_IOC_DEFRAGMENT
:
3010 case F2FS_IOC_MOVE_RANGE
:
3011 case F2FS_IOC_FLUSH_DEVICE
:
3012 case F2FS_IOC_GET_FEATURES
:
3013 case F2FS_IOC_FSGETXATTR
:
3014 case F2FS_IOC_FSSETXATTR
:
3015 case F2FS_IOC_GET_PIN_FILE
:
3016 case F2FS_IOC_SET_PIN_FILE
:
3017 case F2FS_IOC_PRECACHE_EXTENTS
:
3020 return -ENOIOCTLCMD
;
3022 return f2fs_ioctl(file
, cmd
, (unsigned long) compat_ptr(arg
));
3026 const struct file_operations f2fs_file_operations
= {
3027 .llseek
= f2fs_llseek
,
3028 .read_iter
= generic_file_read_iter
,
3029 .write_iter
= f2fs_file_write_iter
,
3030 .open
= f2fs_file_open
,
3031 .release
= f2fs_release_file
,
3032 .mmap
= f2fs_file_mmap
,
3033 .flush
= f2fs_file_flush
,
3034 .fsync
= f2fs_sync_file
,
3035 .fallocate
= f2fs_fallocate
,
3036 .unlocked_ioctl
= f2fs_ioctl
,
3037 #ifdef CONFIG_COMPAT
3038 .compat_ioctl
= f2fs_compat_ioctl
,
3040 .splice_read
= generic_file_splice_read
,
3041 .splice_write
= iter_file_splice_write
,