2 * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 /************************************************************************/
20 /* PROJECT : exFAT & FAT12/16/32 File System */
22 /* PURPOSE : sdFAT glue layer for supporting VFS */
24 /*----------------------------------------------------------------------*/
28 /************************************************************************/
30 #include <linux/version.h>
31 #include <linux/module.h>
32 #include <linux/init.h>
33 #include <linux/time.h>
34 #include <linux/slab.h>
35 #include <linux/seq_file.h>
36 #include <linux/pagemap.h>
37 #include <linux/mpage.h>
38 #include <linux/buffer_head.h>
39 #include <linux/exportfs.h>
40 #include <linux/mount.h>
41 #include <linux/vfs.h>
42 #include <linux/parser.h>
43 #include <linux/uio.h>
44 #include <linux/writeback.h>
45 #include <linux/log2.h>
46 #include <linux/hash.h>
47 #include <linux/backing-dev.h>
48 #include <linux/sched.h>
49 #include <linux/fs_struct.h>
50 #include <linux/namei.h>
51 #include <linux/bio.h>
52 #include <linux/blkdev.h>
53 #include <linux/swap.h> /* for mark_page_accessed() */
54 #include <linux/vmalloc.h>
55 #include <asm/current.h>
56 #include <asm/unaligned.h>
57 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0)
58 #include <linux/iversion.h>
59 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
60 #include <linux/aio.h>
63 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
64 #error SDFAT only supports linux kernel version 3.0 or higher
70 /* skip iterating emit_dots when dir is empty */
71 #define ITER_POS_FILLED_DOTS (2)
73 /* type index declare at sdfat.h */
74 const char *FS_TYPE_STR
[] = {
80 static struct kset
*sdfat_kset
;
81 static struct kmem_cache
*sdfat_inode_cachep
;
84 static int sdfat_default_codepage
= CONFIG_SDFAT_DEFAULT_CODEPAGE
;
85 static char sdfat_default_iocharset
[] = CONFIG_SDFAT_DEFAULT_IOCHARSET
;
86 static const char sdfat_iocharset_with_utf8
[] = "iso8859-1";
88 #ifdef CONFIG_SDFAT_TRACE_SB_LOCK
89 static unsigned long __lock_jiffies
;
92 static void sdfat_truncate(struct inode
*inode
, loff_t old_size
);
93 static int sdfat_get_block(struct inode
*inode
, sector_t iblock
,
94 struct buffer_head
*bh_result
, int create
);
96 static struct inode
*sdfat_iget(struct super_block
*sb
, loff_t i_pos
);
97 static struct inode
*sdfat_build_inode(struct super_block
*sb
, const FILE_ID_T
*fid
, loff_t i_pos
);
98 static void sdfat_detach(struct inode
*inode
);
99 static void sdfat_attach(struct inode
*inode
, loff_t i_pos
);
100 static inline unsigned long sdfat_hash(loff_t i_pos
);
101 static int __sdfat_write_inode(struct inode
*inode
, int sync
);
102 static int sdfat_sync_inode(struct inode
*inode
);
103 static int sdfat_write_inode(struct inode
*inode
, struct writeback_control
*wbc
);
104 static void sdfat_write_super(struct super_block
*sb
);
105 static void sdfat_write_failed(struct address_space
*mapping
, loff_t to
);
107 static void sdfat_init_namebuf(DENTRY_NAMEBUF_T
*nb
);
108 static int sdfat_alloc_namebuf(DENTRY_NAMEBUF_T
*nb
);
109 static void sdfat_free_namebuf(DENTRY_NAMEBUF_T
*nb
);
111 /*************************************************************************
112 * INNER FUNCTIONS FOR FUNCTIONS WHICH HAS KERNEL VERSION DEPENDENCY
113 *************************************************************************/
114 static int __sdfat_getattr(struct inode
*inode
, struct kstat
*stat
);
115 static void __sdfat_writepage_end_io(struct bio
*bio
, int err
);
116 static inline void __lock_super(struct super_block
*sb
);
117 static inline void __unlock_super(struct super_block
*sb
);
118 static int __sdfat_create(struct inode
*dir
, struct dentry
*dentry
);
119 static int __sdfat_revalidate(struct dentry
*dentry
);
120 static int __sdfat_revalidate_ci(struct dentry
*dentry
, unsigned int flags
);
121 static int __sdfat_file_fsync(struct file
*filp
, loff_t start
, loff_t end
, int datasync
);
122 static struct dentry
*__sdfat_lookup(struct inode
*dir
, struct dentry
*dentry
);
123 static int __sdfat_mkdir(struct inode
*dir
, struct dentry
*dentry
);
124 static int __sdfat_rename(struct inode
*old_dir
, struct dentry
*old_dentry
,
125 struct inode
*new_dir
, struct dentry
*new_dentry
);
126 static int __sdfat_show_options(struct seq_file
*m
, struct super_block
*sb
);
127 static inline ssize_t
__sdfat_blkdev_direct_IO(int rw
, struct kiocb
*iocb
,
128 struct inode
*inode
, void *iov_u
, loff_t offset
,
129 unsigned long nr_segs
);
130 static inline ssize_t
__sdfat_direct_IO(int rw
, struct kiocb
*iocb
,
131 struct inode
*inode
, void *iov_u
, loff_t offset
,
132 loff_t count
, unsigned long nr_segs
);
133 static int __sdfat_d_hash(const struct dentry
*dentry
, struct qstr
*qstr
);
134 static int __sdfat_d_hashi(const struct dentry
*dentry
, struct qstr
*qstr
);
135 static int __sdfat_cmp(const struct dentry
*dentry
, unsigned int len
,
136 const char *str
, const struct qstr
*name
);
137 static int __sdfat_cmpi(const struct dentry
*dentry
, unsigned int len
,
138 const char *str
, const struct qstr
*name
);
140 /*************************************************************************
141 * FUNCTIONS WHICH HAS KERNEL VERSION DEPENDENCY
142 *************************************************************************/
143 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)
144 static inline void inode_set_iversion(struct inode
*inode
, u64 val
)
146 inode
->i_version
= val
;
148 static inline u64
inode_peek_iversion(struct inode
*inode
)
150 return inode
->i_version
;
155 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
157 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) */
158 static inline void bio_set_dev(struct bio
*bio
, struct block_device
*bdev
)
165 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
166 static int sdfat_getattr(const struct path
*path
, struct kstat
*stat
,
167 u32 request_mask
, unsigned int query_flags
)
169 struct inode
*inode
= d_backing_inode(path
->dentry
);
171 return __sdfat_getattr(inode
, stat
);
173 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) */
174 static int sdfat_getattr(struct vfsmount
*mnt
, struct dentry
*dentry
, struct kstat
*stat
)
176 struct inode
*inode
= dentry
->d_inode
;
178 return __sdfat_getattr(inode
, stat
);
183 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
184 static inline void __sdfat_clean_bdev_aliases(struct block_device
*bdev
, sector_t block
)
186 clean_bdev_aliases(bdev
, block
, 1);
188 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) */
189 static inline void __sdfat_clean_bdev_aliases(struct block_device
*bdev
, sector_t block
)
191 unmap_underlying_metadata(bdev
, block
);
194 static inline int wbc_to_write_flags(struct writeback_control
*wbc
)
196 if (wbc
->sync_mode
== WB_SYNC_ALL
)
204 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
205 static int sdfat_rename(struct inode
*old_dir
, struct dentry
*old_dentry
,
206 struct inode
*new_dir
, struct dentry
*new_dentry
,
210 * The VFS already checks for existence, so for local filesystems
211 * the RENAME_NOREPLACE implementation is equivalent to plain rename.
212 * Don't support any other flags
214 if (flags
& ~RENAME_NOREPLACE
)
216 return __sdfat_rename(old_dir
, old_dentry
, new_dir
, new_dentry
);
218 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0) */
219 static int sdfat_rename(struct inode
*old_dir
, struct dentry
*old_dentry
,
220 struct inode
*new_dir
, struct dentry
*new_dentry
)
222 return __sdfat_rename(old_dir
, old_dentry
, new_dir
, new_dentry
);
225 static int setattr_prepare(struct dentry
*dentry
, struct iattr
*attr
)
227 struct inode
*inode
= dentry
->d_inode
;
229 return inode_change_ok(inode
, attr
);
234 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
235 static inline void __sdfat_submit_bio_write(struct bio
*bio
,
236 struct writeback_control
*wbc
)
238 int write_flags
= wbc_to_write_flags(wbc
);
240 bio_set_op_attrs(bio
, REQ_OP_WRITE
, write_flags
);
244 static inline unsigned int __sdfat_full_name_hash(const struct dentry
*dentry
, const char *name
, unsigned int len
)
246 return full_name_hash(dentry
, name
, len
);
249 static inline unsigned long __sdfat_init_name_hash(const struct dentry
*dentry
)
251 return init_name_hash(dentry
);
253 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) */
254 static inline void __sdfat_submit_bio_write(struct bio
*bio
,
255 struct writeback_control
*wbc
)
257 int write_flags
= wbc_to_write_flags(wbc
);
259 submit_bio(write_flags
, bio
);
262 static inline unsigned int __sdfat_full_name_hash(const struct dentry
*unused
, const char *name
, unsigned int len
)
264 return full_name_hash(name
, len
);
267 static inline unsigned long __sdfat_init_name_hash(const struct dentry
*unused
)
269 return init_name_hash();
274 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 21)
276 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 21) */
277 static inline void inode_lock(struct inode
*inode
)
279 mutex_lock(&inode
->i_mutex
);
282 static inline void inode_unlock(struct inode
*inode
)
284 mutex_unlock(&inode
->i_mutex
);
289 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
290 static inline int sdfat_remount_syncfs(struct super_block
*sb
)
295 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) */
296 static inline int sdfat_remount_syncfs(struct super_block
*sb
)
299 * We don`t need to call sync_filesystem(sb),
300 * Because VFS calls it.
306 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
308 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) */
309 static inline void truncate_inode_pages_final(struct address_space
*mapping
)
311 truncate_inode_pages(mapping
, 0);
316 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
317 static inline sector_t
__sdfat_bio_sector(struct bio
*bio
)
319 return bio
->bi_iter
.bi_sector
;
322 static inline void __sdfat_set_bio_iterate(struct bio
*bio
, sector_t sector
,
323 unsigned int size
, unsigned int idx
, unsigned int done
)
325 struct bvec_iter
*iter
= &(bio
->bi_iter
);
327 iter
->bi_sector
= sector
;
328 iter
->bi_size
= size
;
330 iter
->bi_bvec_done
= done
;
333 static void __sdfat_truncate_pagecache(struct inode
*inode
,
334 loff_t to
, loff_t newsize
)
336 truncate_pagecache(inode
, newsize
);
339 static int sdfat_d_hash(const struct dentry
*dentry
, struct qstr
*qstr
)
341 return __sdfat_d_hash(dentry
, qstr
);
344 static int sdfat_d_hashi(const struct dentry
*dentry
, struct qstr
*qstr
)
346 return __sdfat_d_hashi(dentry
, qstr
);
349 //instead of sdfat_readdir
350 static int sdfat_iterate(struct file
*filp
, struct dir_context
*ctx
)
352 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
353 struct super_block
*sb
= inode
->i_sb
;
354 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
355 FS_INFO_T
*fsi
= &(sbi
->fsi
);
357 DENTRY_NAMEBUF_T
*nb
= &(de
.NameBuf
);
360 int err
= 0, fake_offset
= 0;
362 sdfat_init_namebuf(nb
);
366 if ((fsi
->vol_type
== EXFAT
) || (inode
->i_ino
== SDFAT_ROOT_INO
)) {
367 if (!dir_emit_dots(filp
, ctx
))
369 if (ctx
->pos
== ITER_POS_FILLED_DOTS
) {
374 if (cpos
& (DENTRY_SIZE
- 1)) {
379 /* name buffer should be allocated before use */
380 err
= sdfat_alloc_namebuf(nb
);
384 SDFAT_I(inode
)->fid
.size
= i_size_read(inode
);
385 SDFAT_I(inode
)->fid
.rwoffset
= cpos
>> DENTRY_SIZE_BITS
;
387 if (cpos
>= SDFAT_I(inode
)->fid
.size
)
390 err
= fsapi_readdir(inode
, &de
);
392 // at least we tried to read a sector
393 // move cpos to next sector position (should be aligned)
395 cpos
+= 1 << (sb
->s_blocksize_bits
);
396 cpos
&= ~((u32
)sb
->s_blocksize
-1);
403 cpos
= SDFAT_I(inode
)->fid
.rwoffset
<< DENTRY_SIZE_BITS
;
408 if (!memcmp(nb
->sfn
, DOS_CUR_DIR_NAME
, DOS_NAME_LENGTH
)) {
410 } else if (!memcmp(nb
->sfn
, DOS_PAR_DIR_NAME
, DOS_NAME_LENGTH
)) {
411 inum
= parent_ino(filp
->f_path
.dentry
);
413 loff_t i_pos
= ((loff_t
) SDFAT_I(inode
)->fid
.start_clu
<< 32) |
414 ((SDFAT_I(inode
)->fid
.rwoffset
-1) & 0xffffffff);
415 struct inode
*tmp
= sdfat_iget(sb
, i_pos
);
421 inum
= iunique(sb
, SDFAT_ROOT_INO
);
425 /* Before calling dir_emit(), sb_lock should be released.
426 * Because page fault can occur in dir_emit() when the size of buffer given
427 * from user is larger than one page size
430 if (!dir_emit(ctx
, nb
->lfn
, strlen(nb
->lfn
), inum
,
431 (de
.Attr
& ATTR_SUBDIR
) ? DT_DIR
: DT_REG
))
439 if (!cpos
&& fake_offset
)
440 cpos
= ITER_POS_FILLED_DOTS
;
446 * To improve performance, free namebuf after unlock sb_lock.
447 * If namebuf is not allocated, this function do nothing
449 sdfat_free_namebuf(nb
);
452 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) */
453 static inline sector_t
__sdfat_bio_sector(struct bio
*bio
)
455 return bio
->bi_sector
;
458 static inline void __sdfat_set_bio_iterate(struct bio
*bio
, sector_t sector
,
459 unsigned int size
, unsigned int idx
, unsigned int done
)
461 bio
->bi_sector
= sector
;
463 bio
->bi_size
= size
; //PAGE_SIZE;
466 static void __sdfat_truncate_pagecache(struct inode
*inode
,
467 loff_t to
, loff_t newsize
)
469 truncate_pagecache(inode
, to
, newsize
);
472 static int sdfat_d_hash(const struct dentry
*dentry
,
473 const struct inode
*inode
, struct qstr
*qstr
)
475 return __sdfat_d_hash(dentry
, qstr
);
478 static int sdfat_d_hashi(const struct dentry
*dentry
,
479 const struct inode
*inode
, struct qstr
*qstr
)
481 return __sdfat_d_hashi(dentry
, qstr
);
484 static int sdfat_readdir(struct file
*filp
, void *dirent
, filldir_t filldir
)
486 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
487 struct super_block
*sb
= inode
->i_sb
;
488 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
489 FS_INFO_T
*fsi
= &(sbi
->fsi
);
491 DENTRY_NAMEBUF_T
*nb
= &(de
.NameBuf
);
494 int err
= 0, fake_offset
= 0;
496 sdfat_init_namebuf(nb
);
500 /* Fake . and .. for the root directory. */
501 if ((fsi
->vol_type
== EXFAT
) || (inode
->i_ino
== SDFAT_ROOT_INO
)) {
502 while (cpos
< ITER_POS_FILLED_DOTS
) {
503 if (inode
->i_ino
== SDFAT_ROOT_INO
)
504 inum
= SDFAT_ROOT_INO
;
507 else /* (cpos == 1) */
508 inum
= parent_ino(filp
->f_path
.dentry
);
510 if (filldir(dirent
, "..", cpos
+1, cpos
, inum
, DT_DIR
) < 0)
515 if (cpos
== ITER_POS_FILLED_DOTS
) {
520 if (cpos
& (DENTRY_SIZE
- 1)) {
525 /* name buffer should be allocated before use */
526 err
= sdfat_alloc_namebuf(nb
);
530 SDFAT_I(inode
)->fid
.size
= i_size_read(inode
);
531 SDFAT_I(inode
)->fid
.rwoffset
= cpos
>> DENTRY_SIZE_BITS
;
533 if (cpos
>= SDFAT_I(inode
)->fid
.size
)
536 err
= fsapi_readdir(inode
, &de
);
538 // at least we tried to read a sector
539 // move cpos to next sector position (should be aligned)
541 cpos
+= 1 << (sb
->s_blocksize_bits
);
542 cpos
&= ~((u32
)sb
->s_blocksize
-1);
549 cpos
= SDFAT_I(inode
)->fid
.rwoffset
<< DENTRY_SIZE_BITS
;
554 if (!memcmp(nb
->sfn
, DOS_CUR_DIR_NAME
, DOS_NAME_LENGTH
)) {
556 } else if (!memcmp(nb
->sfn
, DOS_PAR_DIR_NAME
, DOS_NAME_LENGTH
)) {
557 inum
= parent_ino(filp
->f_path
.dentry
);
559 loff_t i_pos
= ((loff_t
) SDFAT_I(inode
)->fid
.start_clu
<< 32) |
560 ((SDFAT_I(inode
)->fid
.rwoffset
-1) & 0xffffffff);
561 struct inode
*tmp
= sdfat_iget(sb
, i_pos
);
567 inum
= iunique(sb
, SDFAT_ROOT_INO
);
571 /* Before calling dir_emit(), sb_lock should be released.
572 * Because page fault can occur in dir_emit() when the size of buffer given
573 * from user is larger than one page size
576 if (filldir(dirent
, nb
->lfn
, strlen(nb
->lfn
), cpos
, inum
,
577 (de
.Attr
& ATTR_SUBDIR
) ? DT_DIR
: DT_REG
) < 0)
585 if (!cpos
&& fake_offset
)
586 cpos
= ITER_POS_FILLED_DOTS
;
592 * To improve performance, free namebuf after unlock sb_lock.
593 * If namebuf is not allocated, this function do nothing
595 sdfat_free_namebuf(nb
);
601 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
603 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0) */
604 static inline struct inode
*file_inode(const struct file
*f
)
606 return f
->f_dentry
->d_inode
;
611 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
612 static inline int __is_sb_dirty(struct super_block
*sb
)
614 return SDFAT_SB(sb
)->s_dirt
;
617 static inline void __set_sb_clean(struct super_block
*sb
)
619 SDFAT_SB(sb
)->s_dirt
= 0;
622 /* Workqueue wrapper for sdfat_write_super () */
623 static void __write_super_delayed(struct work_struct
*work
)
625 struct sdfat_sb_info
*sbi
;
626 struct super_block
*sb
;
628 sbi
= container_of(work
, struct sdfat_sb_info
, write_super_work
.work
);
631 /* XXX: Is this needed? */
632 if (!sb
|| !down_read_trylock(&sb
->s_umount
)) {
633 DMSG("%s: skip delayed work(write_super).\n", __func__
);
637 DMSG("%s: do delayed_work(write_super).\n", __func__
);
639 spin_lock(&sbi
->work_lock
);
640 sbi
->write_super_queued
= 0;
641 spin_unlock(&sbi
->work_lock
);
643 sdfat_write_super(sb
);
645 up_read(&sb
->s_umount
);
648 static void setup_sdfat_sync_super_wq(struct super_block
*sb
)
650 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
652 mutex_init(&sbi
->s_lock
);
653 spin_lock_init(&sbi
->work_lock
);
654 INIT_DELAYED_WORK(&sbi
->write_super_work
, __write_super_delayed
);
658 static inline bool __cancel_delayed_work_sync(struct sdfat_sb_info
*sbi
)
660 return cancel_delayed_work_sync(&sbi
->write_super_work
);
663 static inline void lock_super(struct super_block
*sb
)
665 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
667 mutex_lock(&sbi
->s_lock
);
670 static inline void unlock_super(struct super_block
*sb
)
672 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
674 mutex_unlock(&sbi
->s_lock
);
677 static int sdfat_revalidate(struct dentry
*dentry
, unsigned int flags
)
679 if (flags
& LOOKUP_RCU
)
682 return __sdfat_revalidate(dentry
);
685 static int sdfat_revalidate_ci(struct dentry
*dentry
, unsigned int flags
)
687 if (flags
& LOOKUP_RCU
)
690 return __sdfat_revalidate_ci(dentry
, flags
);
693 static struct inode
*sdfat_iget(struct super_block
*sb
, loff_t i_pos
)
695 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
696 struct sdfat_inode_info
*info
;
697 struct hlist_head
*head
= sbi
->inode_hashtable
+ sdfat_hash(i_pos
);
698 struct inode
*inode
= NULL
;
700 spin_lock(&sbi
->inode_hash_lock
);
701 hlist_for_each_entry(info
, head
, i_hash_fat
) {
702 BUG_ON(info
->vfs_inode
.i_sb
!= sb
);
704 if (i_pos
!= info
->i_pos
)
706 inode
= igrab(&info
->vfs_inode
);
710 spin_unlock(&sbi
->inode_hash_lock
);
713 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0) */
714 static inline int __is_sb_dirty(struct super_block
*sb
)
719 static inline void __set_sb_clean(struct super_block
*sb
)
724 static void setup_sdfat_sync_super_wq(struct super_block
*sb
)
726 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
731 static inline bool __cancel_delayed_work_sync(struct sdfat_sb_info
*sbi
)
737 static inline void clear_inode(struct inode
*inode
)
739 end_writeback(inode
);
742 static int sdfat_revalidate(struct dentry
*dentry
, struct nameidata
*nd
)
744 if (nd
&& nd
->flags
& LOOKUP_RCU
)
747 return __sdfat_revalidate(dentry
);
750 static int sdfat_revalidate_ci(struct dentry
*dentry
, struct nameidata
*nd
)
752 if (nd
&& nd
->flags
& LOOKUP_RCU
)
755 return __sdfat_revalidate_ci(dentry
, nd
? nd
->flags
: 0);
759 static struct inode
*sdfat_iget(struct super_block
*sb
, loff_t i_pos
)
761 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
762 struct sdfat_inode_info
*info
;
763 struct hlist_node
*node
;
764 struct hlist_head
*head
= sbi
->inode_hashtable
+ sdfat_hash(i_pos
);
765 struct inode
*inode
= NULL
;
767 spin_lock(&sbi
->inode_hash_lock
);
768 hlist_for_each_entry(info
, node
, head
, i_hash_fat
) {
769 BUG_ON(info
->vfs_inode
.i_sb
!= sb
);
771 if (i_pos
!= info
->i_pos
)
773 inode
= igrab(&info
->vfs_inode
);
777 spin_unlock(&sbi
->inode_hash_lock
);
783 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
784 static struct dentry
*sdfat_lookup(struct inode
*dir
, struct dentry
*dentry
,
787 return __sdfat_lookup(dir
, dentry
);
789 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0) */
790 static struct dentry
*sdfat_lookup(struct inode
*dir
, struct dentry
*dentry
,
791 struct nameidata
*nd
)
793 return __sdfat_lookup(dir
, dentry
);
798 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
800 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0) */
801 #define GLOBAL_ROOT_UID (0)
802 #define GLOBAL_ROOT_GID (0)
804 static inline bool uid_eq(uid_t left
, uid_t right
)
806 return left
== right
;
809 static inline bool gid_eq(gid_t left
, gid_t right
)
811 return left
== right
;
814 static inline uid_t
from_kuid_munged(struct user_namespace
*to
, uid_t kuid
)
819 static inline gid_t
from_kgid_munged(struct user_namespace
*to
, gid_t kgid
)
824 static inline uid_t
make_kuid(struct user_namespace
*from
, uid_t uid
)
829 static inline gid_t
make_kgid(struct user_namespace
*from
, gid_t gid
)
836 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
837 static struct dentry
*__d_make_root(struct inode
*root_inode
)
839 return d_make_root(root_inode
);
842 static void __sdfat_do_truncate(struct inode
*inode
, loff_t old
, loff_t
new)
844 down_write(&SDFAT_I(inode
)->truncate_lock
);
845 truncate_setsize(inode
, new);
846 sdfat_truncate(inode
, old
);
847 up_write(&SDFAT_I(inode
)->truncate_lock
);
850 static sector_t
sdfat_aop_bmap(struct address_space
*mapping
, sector_t block
)
854 /* sdfat_get_cluster() assumes the requested blocknr isn't truncated. */
855 down_read(&SDFAT_I(mapping
->host
)->truncate_lock
);
856 blocknr
= generic_block_bmap(mapping
, block
, sdfat_get_block
);
857 up_read(&SDFAT_I(mapping
->host
)->truncate_lock
);
861 static int sdfat_mkdir(struct inode
*dir
, struct dentry
*dentry
, umode_t mode
)
863 return __sdfat_mkdir(dir
, dentry
);
866 static int sdfat_show_options(struct seq_file
*m
, struct dentry
*root
)
868 return __sdfat_show_options(m
, root
->d_sb
);
870 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) */
871 static inline void set_nlink(struct inode
*inode
, unsigned int nlink
)
873 inode
->i_nlink
= nlink
;
876 static struct dentry
*__d_make_root(struct inode
*root_inode
)
878 return d_alloc_root(root_inode
);
881 static void __sdfat_do_truncate(struct inode
*inode
, loff_t old
, loff_t
new)
883 truncate_setsize(inode
, new);
884 sdfat_truncate(inode
, old
);
887 static sector_t
sdfat_aop_bmap(struct address_space
*mapping
, sector_t block
)
891 /* sdfat_get_cluster() assumes the requested blocknr isn't truncated. */
892 down_read(&mapping
->host
->i_alloc_sem
);
893 blocknr
= generic_block_bmap(mapping
, block
, sdfat_get_block
);
894 up_read(&mapping
->host
->i_alloc_sem
);
898 static int sdfat_mkdir(struct inode
*dir
, struct dentry
*dentry
, int mode
)
900 return __sdfat_mkdir(dir
, dentry
);
903 static int sdfat_show_options(struct seq_file
*m
, struct vfsmount
*mnt
)
905 return __sdfat_show_options(m
, mnt
->mnt_sb
);
910 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)
911 #define __sdfat_generic_file_fsync(filp, start, end, datasync) \
912 generic_file_fsync(filp, start, end, datasync)
914 static int sdfat_file_fsync(struct file
*filp
, loff_t start
, loff_t end
, int datasync
)
916 return __sdfat_file_fsync(filp
, start
, end
, datasync
);
918 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0) */
919 #define __sdfat_generic_file_fsync(filp, start, end, datasync) \
920 generic_file_fsync(filp, datasync)
921 static int sdfat_file_fsync(struct file
*filp
, int datasync
)
923 return __sdfat_file_fsync(filp
, 0, 0, datasync
);
927 /*************************************************************************
928 * MORE FUNCTIONS WHICH HAS KERNEL VERSION DEPENDENCY
929 *************************************************************************/
930 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
931 static void sdfat_writepage_end_io(struct bio
*bio
)
933 __sdfat_writepage_end_io(bio
, blk_status_to_errno(bio
->bi_status
));
935 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
936 static void sdfat_writepage_end_io(struct bio
*bio
)
938 __sdfat_writepage_end_io(bio
, bio
->bi_error
);
940 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) */
941 static void sdfat_writepage_end_io(struct bio
*bio
, int err
)
943 if (test_bit(BIO_UPTODATE
, &bio
->bi_flags
))
945 __sdfat_writepage_end_io(bio
, err
);
950 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
951 static int sdfat_cmp(const struct dentry
*dentry
,
952 unsigned int len
, const char *str
, const struct qstr
*name
)
954 return __sdfat_cmp(dentry
, len
, str
, name
);
957 static int sdfat_cmpi(const struct dentry
*dentry
,
958 unsigned int len
, const char *str
, const struct qstr
*name
)
960 return __sdfat_cmpi(dentry
, len
, str
, name
);
962 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
963 static int sdfat_cmp(const struct dentry
*parent
, const struct dentry
*dentry
,
964 unsigned int len
, const char *str
, const struct qstr
*name
)
966 return __sdfat_cmp(dentry
, len
, str
, name
);
969 static int sdfat_cmpi(const struct dentry
*parent
, const struct dentry
*dentry
,
970 unsigned int len
, const char *str
, const struct qstr
*name
)
972 return __sdfat_cmpi(dentry
, len
, str
, name
);
975 static int sdfat_cmp(const struct dentry
*parent
, const struct inode
*pinode
,
976 const struct dentry
*dentry
, const struct inode
*inode
,
977 unsigned int len
, const char *str
, const struct qstr
*name
)
979 return __sdfat_cmp(dentry
, len
, str
, name
);
982 static int sdfat_cmpi(const struct dentry
*parent
, const struct inode
*pinode
,
983 const struct dentry
*dentry
, const struct inode
*inode
,
984 unsigned int len
, const char *str
, const struct qstr
*name
)
986 return __sdfat_cmpi(dentry
, len
, str
, name
);
991 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
992 static ssize_t
sdfat_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
)
994 struct file
*file
= iocb
->ki_filp
;
995 struct address_space
*mapping
= file
->f_mapping
;
996 struct inode
*inode
= mapping
->host
;
997 size_t count
= iov_iter_count(iter
);
998 int rw
= iov_iter_rw(iter
);
999 loff_t offset
= iocb
->ki_pos
;
1001 return __sdfat_direct_IO(rw
, iocb
, inode
,
1002 (void *)iter
, offset
, count
, 0 /* UNUSED */);
1004 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
1005 static ssize_t
sdfat_direct_IO(struct kiocb
*iocb
,
1006 struct iov_iter
*iter
,
1009 struct file
*file
= iocb
->ki_filp
;
1010 struct address_space
*mapping
= file
->f_mapping
;
1011 struct inode
*inode
= mapping
->host
;
1012 size_t count
= iov_iter_count(iter
);
1013 int rw
= iov_iter_rw(iter
);
1015 return __sdfat_direct_IO(rw
, iocb
, inode
,
1016 (void *)iter
, offset
, count
, 0 /* UNUSED */);
1018 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
1019 static ssize_t
sdfat_direct_IO(int rw
, struct kiocb
*iocb
,
1020 struct iov_iter
*iter
,
1023 struct file
*file
= iocb
->ki_filp
;
1024 struct address_space
*mapping
= file
->f_mapping
;
1025 struct inode
*inode
= mapping
->host
;
1026 size_t count
= iov_iter_count(iter
);
1028 return __sdfat_direct_IO(rw
, iocb
, inode
,
1029 (void *)iter
, offset
, count
, 0 /* UNUSED */);
1032 static ssize_t
sdfat_direct_IO(int rw
, struct kiocb
*iocb
,
1033 const struct iovec
*iov
, loff_t offset
, unsigned long nr_segs
)
1035 struct file
*file
= iocb
->ki_filp
;
1036 struct address_space
*mapping
= file
->f_mapping
;
1037 struct inode
*inode
= mapping
->host
;
1038 size_t count
= iov_length(iov
, nr_segs
);
1040 return __sdfat_direct_IO(rw
, iocb
, inode
,
1041 (void *)iov
, offset
, count
, nr_segs
);
1046 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
1047 static inline ssize_t
__sdfat_blkdev_direct_IO(int unused
, struct kiocb
*iocb
,
1048 struct inode
*inode
, void *iov_u
, loff_t unused_1
,
1049 unsigned long nr_segs
)
1051 struct iov_iter
*iter
= (struct iov_iter
*)iov_u
;
1053 return blockdev_direct_IO(iocb
, inode
, iter
, sdfat_get_block
);
1055 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
1056 static inline ssize_t
__sdfat_blkdev_direct_IO(int unused
, struct kiocb
*iocb
,
1057 struct inode
*inode
, void *iov_u
, loff_t offset
,
1058 unsigned long nr_segs
)
1060 struct iov_iter
*iter
= (struct iov_iter
*)iov_u
;
1062 return blockdev_direct_IO(iocb
, inode
, iter
, offset
, sdfat_get_block
);
1064 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
1065 static inline ssize_t
__sdfat_blkdev_direct_IO(int rw
, struct kiocb
*iocb
,
1066 struct inode
*inode
, void *iov_u
, loff_t offset
,
1067 unsigned long nr_segs
)
1069 struct iov_iter
*iter
= (struct iov_iter
*)iov_u
;
1071 return blockdev_direct_IO(rw
, iocb
, inode
, iter
,
1072 offset
, sdfat_get_block
);
1074 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
1075 static inline ssize_t
__sdfat_blkdev_direct_IO(int rw
, struct kiocb
*iocb
,
1076 struct inode
*inode
, void *iov_u
, loff_t offset
,
1077 unsigned long nr_segs
)
1079 const struct iovec
*iov
= (const struct iovec
*)iov_u
;
1081 return blockdev_direct_IO(rw
, iocb
, inode
, iov
,
1082 offset
, nr_segs
, sdfat_get_block
);
1085 static inline ssize_t
__sdfat_blkdev_direct_IO(int rw
, struct kiocb
*iocb
,
1086 struct inode
*inode
, void *iov_u
, loff_t offset
,
1087 unsigned long nr_segs
)
1089 const struct iovec
*iov
= (const struct iovec
*)iov_u
;
1091 return blockdev_direct_IO(rw
, iocb
, inode
, inode
->i_sb
->s_bdev
, iov
,
1092 offset
, nr_segs
, sdfat_get_block
, NULL
);
1097 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
1098 static const char *sdfat_follow_link(struct dentry
*dentry
, struct inode
*inode
, struct delayed_call
*done
)
1100 struct sdfat_inode_info
*ei
= SDFAT_I(inode
);
1102 return (char *)(ei
->target
);
1104 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
1105 static const char *sdfat_follow_link(struct dentry
*dentry
, void **cookie
)
1107 struct sdfat_inode_info
*ei
= SDFAT_I(dentry
->d_inode
);
1109 return *cookie
= (char *)(ei
->target
);
1112 static void *sdfat_follow_link(struct dentry
*dentry
, struct nameidata
*nd
)
1114 struct sdfat_inode_info
*ei
= SDFAT_I(dentry
->d_inode
);
1116 nd_set_link(nd
, (char *)(ei
->target
));
1122 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
1123 static int sdfat_create(struct inode
*dir
, struct dentry
*dentry
, umode_t mode
,
1126 return __sdfat_create(dir
, dentry
);
1128 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
1129 static int sdfat_create(struct inode
*dir
, struct dentry
*dentry
, umode_t mode
,
1130 struct nameidata
*nd
)
1132 return __sdfat_create(dir
, dentry
);
1135 static int sdfat_create(struct inode
*dir
, struct dentry
*dentry
, int mode
,
1136 struct nameidata
*nd
)
1138 return __sdfat_create(dir
, dentry
);
1143 /*************************************************************************
1144 * WRAP FUNCTIONS FOR DEBUGGING
1145 *************************************************************************/
1146 #ifdef CONFIG_SDFAT_TRACE_SB_LOCK
1147 static inline void __lock_super(struct super_block
*sb
)
1150 __lock_jiffies
= jiffies
;
1153 static inline void __unlock_super(struct super_block
*sb
)
1155 int time
= ((jiffies
- __lock_jiffies
) * 1000 / HZ
);
1156 /* FIXME : error message should be modified */
1158 EMSG("lock_super in %s (%d ms)\n", __func__
, time
);
1162 #else /* CONFIG_SDFAT_TRACE_SB_LOCK */
1163 static inline void __lock_super(struct super_block
*sb
)
1168 static inline void __unlock_super(struct super_block
*sb
)
1172 #endif /* CONFIG_SDFAT_TRACE_SB_LOCK */
1174 /*************************************************************************
1176 *************************************************************************/
1177 static inline loff_t
sdfat_make_i_pos(FILE_ID_T
*fid
)
1179 return ((loff_t
) fid
->dir
.dir
<< 32) | (fid
->entry
& 0xffffffff);
1182 /*======================================================================*/
1183 /* Directory Entry Name Buffer Operations */
1184 /*======================================================================*/
1185 static void sdfat_init_namebuf(DENTRY_NAMEBUF_T
*nb
)
1193 static int sdfat_alloc_namebuf(DENTRY_NAMEBUF_T
*nb
)
1195 nb
->lfn
= __getname();
1198 nb
->sfn
= nb
->lfn
+ MAX_VFSNAME_BUF_SIZE
;
1199 nb
->lfnbuf_len
= MAX_VFSNAME_BUF_SIZE
;
1200 nb
->sfnbuf_len
= MAX_VFSNAME_BUF_SIZE
;
1204 static void sdfat_free_namebuf(DENTRY_NAMEBUF_T
*nb
)
1210 sdfat_init_namebuf(nb
);
1213 /*======================================================================*/
1214 /* Directory Entry Operations */
1215 /*======================================================================*/
1216 #define SDFAT_DSTATE_LOCKED (void *)(0xCAFE2016)
1217 #define SDFAT_DSTATE_UNLOCKED (void *)(0x00000000)
1219 static inline void __lock_d_revalidate(struct dentry
*dentry
)
1221 spin_lock(&dentry
->d_lock
);
1222 dentry
->d_fsdata
= SDFAT_DSTATE_LOCKED
;
1223 spin_unlock(&dentry
->d_lock
);
1226 static inline void __unlock_d_revalidate(struct dentry
*dentry
)
1228 spin_lock(&dentry
->d_lock
);
1229 dentry
->d_fsdata
= SDFAT_DSTATE_UNLOCKED
;
1230 spin_unlock(&dentry
->d_lock
);
1233 /* __check_dstate_locked requires dentry->d_lock */
1234 static inline int __check_dstate_locked(struct dentry
*dentry
)
1236 if (dentry
->d_fsdata
== SDFAT_DSTATE_LOCKED
)
1243 * If new entry was created in the parent, it could create the 8.3
1244 * alias (the shortname of logname). So, the parent may have the
1245 * negative-dentry which matches the created 8.3 alias.
1247 * If it happened, the negative dentry isn't actually negative
1248 * anymore. So, drop it.
1250 static int __sdfat_revalidate_common(struct dentry
*dentry
)
1254 spin_lock(&dentry
->d_lock
);
1255 if ((!dentry
->d_inode
) && (!__check_dstate_locked(dentry
) &&
1257 (unsigned long)inode_peek_iversion(dentry
->d_parent
->d_inode
)))) {
1260 spin_unlock(&dentry
->d_lock
);
1264 static int __sdfat_revalidate(struct dentry
*dentry
)
1266 /* This is not negative dentry. Always valid. */
1267 if (dentry
->d_inode
)
1269 return __sdfat_revalidate_common(dentry
);
1272 static int __sdfat_revalidate_ci(struct dentry
*dentry
, unsigned int flags
)
1275 * This is not negative dentry. Always valid.
1277 * Note, rename() to existing directory entry will have ->d_inode,
1278 * and will use existing name which isn't specified name by user.
1280 * We may be able to drop this positive dentry here. But dropping
1281 * positive dentry isn't good idea. So it's unsupported like
1282 * rename("filename", "FILENAME") for now.
1284 if (dentry
->d_inode
)
1286 #if 0 /* Blocked below code for lookup_one_len() called by stackable FS */
1288 * This may be nfsd (or something), anyway, we can't see the
1289 * intent of this. So, since this can be for creation, drop it.
1295 * Drop the negative dentry, in order to make sure to use the
1296 * case sensitive name which is specified by user if this is
1299 if (flags
& (LOOKUP_CREATE
| LOOKUP_RENAME_TARGET
))
1301 return __sdfat_revalidate_common(dentry
);
1305 /* returns the length of a struct qstr, ignoring trailing dots */
1306 static unsigned int __sdfat_striptail_len(unsigned int len
, const char *name
)
1308 while (len
&& name
[len
- 1] == '.')
1313 static unsigned int sdfat_striptail_len(const struct qstr
*qstr
)
1315 return __sdfat_striptail_len(qstr
->len
, qstr
->name
);
1319 * Compute the hash for the sdfat name corresponding to the dentry.
1320 * Note: if the name is invalid, we leave the hash code unchanged so
1321 * that the existing dentry can be used. The sdfat fs routines will
1322 * return ENOENT or EINVAL as appropriate.
1324 static int __sdfat_d_hash(const struct dentry
*dentry
, struct qstr
*qstr
)
1326 unsigned int len
= sdfat_striptail_len(qstr
);
1328 qstr
->hash
= __sdfat_full_name_hash(dentry
, qstr
->name
, len
);
1333 * Compute the hash for the sdfat name corresponding to the dentry.
1334 * Note: if the name is invalid, we leave the hash code unchanged so
1335 * that the existing dentry can be used. The sdfat fs routines will
1336 * return ENOENT or EINVAL as appropriate.
1338 static int __sdfat_d_hashi(const struct dentry
*dentry
, struct qstr
*qstr
)
1340 struct nls_table
*t
= SDFAT_SB(dentry
->d_sb
)->nls_io
;
1341 const unsigned char *name
;
1346 len
= sdfat_striptail_len(qstr
);
1348 hash
= __sdfat_init_name_hash(dentry
);
1350 hash
= partial_name_hash(nls_tolower(t
, *name
++), hash
);
1351 qstr
->hash
= end_name_hash(hash
);
1357 * Case sensitive compare of two sdfat names.
1359 static int __sdfat_cmp(const struct dentry
*dentry
, unsigned int len
,
1360 const char *str
, const struct qstr
*name
)
1362 unsigned int alen
, blen
;
1364 /* A filename cannot end in '.' or we treat it like it has none */
1365 alen
= sdfat_striptail_len(name
);
1366 blen
= __sdfat_striptail_len(len
, str
);
1368 if (strncmp(name
->name
, str
, alen
) == 0)
1375 * Case insensitive compare of two sdfat names.
1377 static int __sdfat_cmpi(const struct dentry
*dentry
, unsigned int len
,
1378 const char *str
, const struct qstr
*name
)
1380 struct nls_table
*t
= SDFAT_SB(dentry
->d_sb
)->nls_io
;
1381 unsigned int alen
, blen
;
1383 /* A filename cannot end in '.' or we treat it like it has none */
1384 alen
= sdfat_striptail_len(name
);
1385 blen
= __sdfat_striptail_len(len
, str
);
1387 if (nls_strnicmp(t
, name
->name
, str
, alen
) == 0)
1393 static const struct dentry_operations sdfat_dentry_ops
= {
1394 .d_revalidate
= sdfat_revalidate
,
1395 .d_hash
= sdfat_d_hash
,
1396 .d_compare
= sdfat_cmp
,
1399 static const struct dentry_operations sdfat_ci_dentry_ops
= {
1400 .d_revalidate
= sdfat_revalidate_ci
,
1401 .d_hash
= sdfat_d_hashi
,
1402 .d_compare
= sdfat_cmpi
,
1405 #ifdef CONFIG_SDFAT_DFR
1406 /*----------------------------------------------------------------------*/
1407 /* Defragmentation related */
1408 /*----------------------------------------------------------------------*/
1410 * @fn defrag_cleanup_reqs
1411 * @brief clean-up defrag info depending on error flag
1413 * @param sb super block
1414 * @param error error flag
1416 static void defrag_cleanup_reqs(INOUT
struct super_block
*sb
, IN
int error
)
1418 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
1419 struct defrag_info
*sb_dfr
= &(sbi
->dfr_info
);
1420 struct defrag_info
*ino_dfr
= NULL
, *tmp
= NULL
;
1421 /* sdfat patch 0.96 : sbi->dfr_info crash problem */
1424 /* Clean-up ino_dfr */
1426 list_for_each_entry_safe(ino_dfr
, tmp
, &sb_dfr
->entry
, entry
) {
1427 struct inode
*inode
= &(container_of(ino_dfr
, struct sdfat_inode_info
, dfr_info
)->vfs_inode
);
1429 mutex_lock(&ino_dfr
->lock
);
1431 atomic_set(&ino_dfr
->stat
, DFR_INO_STAT_IDLE
);
1433 list_del(&ino_dfr
->entry
);
1435 ino_dfr
->chunks
= NULL
;
1436 ino_dfr
->nr_chunks
= 0;
1437 INIT_LIST_HEAD(&ino_dfr
->entry
);
1439 BUG_ON(!mutex_is_locked(&ino_dfr
->lock
));
1440 mutex_unlock(&ino_dfr
->lock
);
1446 /* Clean-up sb_dfr */
1447 sb_dfr
->chunks
= NULL
;
1448 sb_dfr
->nr_chunks
= 0;
1449 INIT_LIST_HEAD(&sb_dfr
->entry
);
1451 /* Clear dfr_new_clus page */
1452 memset(sbi
->dfr_new_clus
, 0, PAGE_SIZE
);
1453 sbi
->dfr_new_idx
= 1;
1454 memset(sbi
->dfr_page_wb
, 0, PAGE_SIZE
);
1456 sbi
->dfr_hint_clus
= sbi
->dfr_hint_idx
= sbi
->dfr_reserved_clus
= 0;
1462 * @fn defrag_validate_pages
1463 * @brief validate and mark dirty for victiim pages
1464 * @return 0 on success, -errno otherwise
1465 * @param inode inode
1466 * @param chunk given chunk
1467 * @remark protected by inode_lock and super_lock
1470 defrag_validate_pages(
1471 IN
struct inode
*inode
,
1472 IN
struct defrag_chunk_info
*chunk
)
1474 struct super_block
*sb
= inode
->i_sb
;
1475 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
1476 struct page
*page
= NULL
;
1477 unsigned int i_size
= 0, page_off
= 0, page_nr
= 0;
1478 int buf_i
= 0, i
= 0, err
= 0;
1480 i_size
= i_size_read(inode
);
1481 page_off
= chunk
->f_clus
* PAGES_PER_CLUS(sb
);
1482 page_nr
= (i_size
/ PAGE_SIZE
) + ((i_size
% PAGE_SIZE
) ? 1 : 0);
1483 if ((i_size
<= 0) || (page_nr
<= 0)) {
1484 dfr_err("inode %p, i_size %d, page_nr %d", inode
, i_size
, page_nr
);
1489 * and check its dirty/writeback/mapped state
1492 i
< min((int)(page_nr
- page_off
), (int)(chunk
->nr_clus
* PAGES_PER_CLUS(sb
)));
1494 page
= find_get_page(inode
->i_mapping
, page_off
+ i
);
1496 if (!trylock_page(page
)) {
1502 dfr_debug("get/lock_page() failed, index %d", i
);
1507 sbi
->dfr_pagep
[buf_i
++] = page
;
1508 if (PageError(page
) || !PageUptodate(page
) || PageDirty(page
) ||
1509 PageWriteback(page
) || page_mapped(page
)) {
1510 dfr_debug("page %p, err %d, uptodate %d, "
1511 "dirty %d, wb %d, mapped %d",
1512 page
, PageError(page
), PageUptodate(page
),
1513 PageDirty(page
), PageWriteback(page
),
1519 set_bit((page
->index
& (PAGES_PER_CLUS(sb
) - 1)),
1520 (volatile unsigned long *)&(sbi
->dfr_page_wb
[chunk
->new_idx
+ i
/ PAGES_PER_CLUS(sb
)]));
1526 * All pages in the chunks are valid.
1528 i_size
-= (chunk
->f_clus
* (sbi
->fsi
.cluster_size
));
1529 BUG_ON(((i_size
/ PAGE_SIZE
) + ((i_size
% PAGE_SIZE
) ? 1 : 0)) != (page_nr
- page_off
));
1531 for (i
= 0; i
< buf_i
; i
++) {
1532 struct buffer_head
*bh
= NULL
, *head
= NULL
;
1535 page
= sbi
->dfr_pagep
[i
];
1538 /* Mark dirty in page */
1539 set_page_dirty(page
);
1540 mark_page_accessed(page
);
1542 /* Attach empty BHs */
1543 if (!page_has_buffers(page
))
1544 create_empty_buffers(page
, 1 << inode
->i_blkbits
, 0);
1546 /* Mark dirty in BHs */
1547 bh
= head
= page_buffers(page
);
1548 BUG_ON(!bh
&& !i_size
);
1550 if ((bh_idx
>= 1) && (bh_idx
>= (i_size
>> inode
->i_blkbits
))) {
1551 clear_buffer_dirty(bh
);
1553 if (PageUptodate(page
))
1554 if (!buffer_uptodate(bh
))
1555 set_buffer_uptodate(bh
);
1557 /* Set this bh as delay */
1559 set_buffer_delay(bh
);
1561 mark_buffer_dirty(bh
);
1565 bh
= bh
->b_this_page
;
1566 } while (bh
!= head
);
1568 /* Mark this page accessed */
1569 mark_page_accessed(page
);
1571 i_size
-= PAGE_SIZE
;
1575 /* Unlock and put refs for pages */
1576 for (i
= 0; i
< buf_i
; i
++) {
1577 BUG_ON(!sbi
->dfr_pagep
[i
]);
1578 unlock_page(sbi
->dfr_pagep
[i
]);
1579 put_page(sbi
->dfr_pagep
[i
]);
1581 memset(sbi
->dfr_pagep
, 0, sizeof(PAGE_SIZE
));
1588 * @fn defrag_validate_reqs
1589 * @brief validate defrag requests
1590 * @return negative if all requests not valid, 0 otherwise
1591 * @param sb super block
1592 * @param chunks given chunks
1595 defrag_validate_reqs(
1596 IN
struct super_block
*sb
,
1597 INOUT
struct defrag_chunk_info
*chunks
)
1599 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
1600 struct defrag_info
*sb_dfr
= &(sbi
->dfr_info
);
1601 int i
= 0, err
= 0, err_cnt
= 0;
1603 /* Validate all reqs */
1604 for (i
= REQ_HEADER_IDX
+ 1; i
< sb_dfr
->nr_chunks
; i
++) {
1605 struct defrag_chunk_info
*chunk
= NULL
;
1606 struct inode
*inode
= NULL
;
1607 struct defrag_info
*ino_dfr
= NULL
;
1613 inode
= sdfat_iget(sb
, chunk
->i_pos
);
1615 dfr_debug("inode not found, i_pos %08llx", chunk
->i_pos
);
1616 chunk
->stat
= DFR_CHUNK_STAT_ERR
;
1623 dfr_debug("req[%d] inode %p, i_pos %08llx, f_clus %d, "
1624 "d_clus %08x, nr %d, prev %08x, next %08x",
1625 i
, inode
, chunk
->i_pos
, chunk
->f_clus
, chunk
->d_clus
,
1626 chunk
->nr_clus
, chunk
->prev_clus
, chunk
->next_clus
);
1628 * Lock ordering: inode_lock -> lock_super
1633 /* Check if enough buffers exist for chunk->new_idx */
1634 if ((sbi
->dfr_new_idx
+ chunk
->nr_clus
) >= (PAGE_SIZE
/ sizeof(int))) {
1635 dfr_err("dfr_new_idx %d, chunk->nr_clus %d",
1636 sbi
->dfr_new_idx
, chunk
->nr_clus
);
1641 /* Reserve clusters for defrag with DA */
1642 err
= fsapi_dfr_reserve_clus(sb
, chunk
->nr_clus
);
1646 /* Check clusters */
1647 err
= fsapi_dfr_validate_clus(inode
, chunk
, 0);
1649 fsapi_dfr_reserve_clus(sb
, 0 - chunk
->nr_clus
);
1650 dfr_debug("Cluster validation: err %d", err
);
1655 err
= defrag_validate_pages(inode
, chunk
);
1657 fsapi_dfr_reserve_clus(sb
, 0 - chunk
->nr_clus
);
1658 dfr_debug("Page validation: err %d", err
);
1662 /* Mark IGNORE flag to victim AU */
1663 if (sbi
->options
.improved_allocation
& SDFAT_ALLOC_SMART
)
1664 fsapi_dfr_mark_ignore(sb
, chunk
->d_clus
);
1666 ino_dfr
= &(SDFAT_I(inode
)->dfr_info
);
1667 mutex_lock(&ino_dfr
->lock
);
1669 /* Update chunk info */
1670 chunk
->stat
= DFR_CHUNK_STAT_REQ
;
1671 chunk
->new_idx
= sbi
->dfr_new_idx
;
1673 /* Update ino_dfr info */
1674 if (list_empty(&(ino_dfr
->entry
))) {
1675 list_add_tail(&ino_dfr
->entry
, &sb_dfr
->entry
);
1676 ino_dfr
->chunks
= chunk
;
1679 ino_dfr
->nr_chunks
++;
1681 atomic_set(&ino_dfr
->stat
, DFR_INO_STAT_REQ
);
1683 BUG_ON(!mutex_is_locked(&ino_dfr
->lock
));
1684 mutex_unlock(&ino_dfr
->lock
);
1686 /* Reserved buffers for chunk->new_idx */
1687 sbi
->dfr_new_idx
+= chunk
->nr_clus
;
1691 chunk
->stat
= DFR_CHUNK_STAT_ERR
;
1696 inode_unlock(inode
);
1699 /* Return error if all chunks are invalid */
1700 if (err_cnt
== sb_dfr
->nr_chunks
- 1) {
1701 dfr_debug("%s failed (err_cnt %d)", __func__
, err_cnt
);
1710 * @fn defrag_check_fs_busy
1711 * @brief check if this module busy
1712 * @return 0 when idle, 1 otherwise
1713 * @param sb super block
1714 * @param reserved_clus # of reserved clusters
1715 * @param queued_pages # of queued pages
1718 defrag_check_fs_busy(
1719 IN
struct super_block
*sb
,
1720 OUT
int *reserved_clus
,
1721 OUT
int *queued_pages
)
1723 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
1726 *reserved_clus
= *queued_pages
= 0;
1729 *reserved_clus
= fsi
->reserved_clusters
;
1730 *queued_pages
= atomic_read(&SDFAT_SB(sb
)->stat_n_pages_queued
);
1732 if (*reserved_clus
|| *queued_pages
)
1741 * @fn sdfat_ioctl_defrag_req
1742 * @brief ioctl to send defrag requests
1743 * @return 0 on success, -errno otherwise
1744 * @param inode inode
1745 * @param uarg given requests
1748 sdfat_ioctl_defrag_req(
1749 IN
struct inode
*inode
,
1750 INOUT
unsigned int *uarg
)
1752 struct super_block
*sb
= inode
->i_sb
;
1753 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
1754 struct defrag_info
*sb_dfr
= &(sbi
->dfr_info
);
1755 struct defrag_chunk_header head
;
1756 struct defrag_chunk_info
*chunks
= NULL
;
1757 unsigned int len
= 0;
1759 unsigned long timeout
= 0;
1761 /* Check overlapped defrag */
1762 if (atomic_cmpxchg(&sb_dfr
->stat
, DFR_SB_STAT_IDLE
, DFR_SB_STAT_REQ
)) {
1763 dfr_debug("sb_dfr->stat %d", atomic_read(&sb_dfr
->stat
));
1767 /* Check if defrag required */
1769 if (!fsapi_dfr_check_dfr_required(sb
, NULL
, NULL
, NULL
)) {
1770 dfr_debug("Not enough space left for defrag (err %d)", -ENOSPC
);
1771 atomic_set(&sb_dfr
->stat
, DFR_SB_STAT_IDLE
);
1778 memset(&head
, 0, sizeof(struct defrag_chunk_header
));
1779 err
= copy_from_user(&head
, uarg
, sizeof(struct defrag_chunk_info
));
1782 /* If FS busy, cancel defrag */
1783 if (!(head
.mode
== DFR_MODE_TEST
)) {
1784 int reserved_clus
= 0, queued_pages
= 0;
1786 err
= defrag_check_fs_busy(sb
, &reserved_clus
, &queued_pages
);
1788 dfr_debug("FS busy, cancel defrag (reserved_clus %d, queued_pages %d)",
1789 reserved_clus
, queued_pages
);
1795 /* Total length is saved in the chunk header's nr_chunks field */
1796 len
= head
.nr_chunks
;
1797 ERR_HANDLE2(!len
, err
, -EINVAL
);
1799 dfr_debug("IOC_DFR_REQ started (mode %d, nr_req %d)", head
.mode
, len
- 1);
1800 if (get_order(len
* sizeof(struct defrag_chunk_info
)) > MAX_ORDER
) {
1801 dfr_debug("len %d, sizeof(struct defrag_chunk_info) %lu, MAX_ORDER %d",
1802 len
, sizeof(struct defrag_chunk_info
), MAX_ORDER
);
1806 chunks
= alloc_pages_exact(len
* sizeof(struct defrag_chunk_info
),
1807 GFP_KERNEL
| __GFP_ZERO
);
1808 ERR_HANDLE2(!chunks
, err
, -ENOMEM
)
1810 err
= copy_from_user(chunks
, uarg
, len
* sizeof(struct defrag_chunk_info
));
1813 /* Initialize sb_dfr */
1814 sb_dfr
->chunks
= chunks
;
1815 sb_dfr
->nr_chunks
= len
;
1817 /* Validate reqs & mark defrag/dirty */
1818 err
= defrag_validate_reqs(sb
, sb_dfr
->chunks
);
1821 atomic_set(&sb_dfr
->stat
, DFR_SB_STAT_VALID
);
1823 /* Wait for defrag completion */
1824 if (head
.mode
== DFR_MODE_ONESHOT
)
1826 else if (head
.mode
& DFR_MODE_BACKGROUND
)
1827 timeout
= DFR_DEFAULT_TIMEOUT
;
1829 timeout
= DFR_MIN_TIMEOUT
;
1831 dfr_debug("Wait for completion (timeout %ld)", timeout
);
1832 init_completion(&sbi
->dfr_complete
);
1833 timeout
= wait_for_completion_timeout(&sbi
->dfr_complete
, timeout
);
1836 /* Force defrag_updat_fat() after timeout. */
1837 dfr_debug("Force sync(), mode %d, left-timeout %ld", head
.mode
, timeout
);
1839 down_read(&sb
->s_umount
);
1844 fsapi_dfr_update_fat_next(sb
);
1846 fsapi_sync_fs(sb
, 1);
1848 #ifdef CONFIG_SDFAT_DFR_DEBUG
1850 fsapi_dfr_spo_test(sb
, DFR_SPO_FAT_NEXT
, __func__
);
1853 fsapi_dfr_update_fat_prev(sb
, 1);
1854 fsapi_sync_fs(sb
, 1);
1858 up_read(&sb
->s_umount
);
1861 #ifdef CONFIG_SDFAT_DFR_DEBUG
1863 fsapi_dfr_spo_test(sb
, DFR_SPO_NORMAL
, __func__
);
1867 /* Send DISCARD to clean-ed AUs */
1868 fsapi_dfr_check_discard(sb
);
1870 #ifdef CONFIG_SDFAT_DFR_DEBUG
1872 fsapi_dfr_spo_test(sb
, DFR_SPO_DISCARD
, __func__
);
1875 /* Unmark IGNORE flag to all victim AUs */
1876 fsapi_dfr_unmark_ignore_all(sb
);
1879 err
= copy_to_user(uarg
, sb_dfr
->chunks
, sizeof(struct defrag_chunk_info
) * len
);
1883 /* Clean-up sb_dfr & ino_dfr */
1884 defrag_cleanup_reqs(sb
, err
);
1887 free_pages_exact(chunks
, len
* sizeof(struct defrag_chunk_info
));
1889 /* Set sb_dfr's state as IDLE */
1890 atomic_set(&sb_dfr
->stat
, DFR_SB_STAT_IDLE
);
1892 dfr_debug("IOC_DFR_REQ done (err %d)", err
);
1897 * @fn sdfat_ioctl_defrag_trav
1898 * @brief ioctl to traverse given directory for defrag
1899 * @return 0 on success, -errno otherwise
1900 * @param inode inode
1901 * @param uarg output buffer
1904 sdfat_ioctl_defrag_trav(
1905 IN
struct inode
*inode
,
1906 INOUT
unsigned int *uarg
)
1908 struct super_block
*sb
= inode
->i_sb
;
1909 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
1910 struct defrag_info
*sb_dfr
= &(sbi
->dfr_info
);
1911 struct defrag_trav_arg
*args
= (struct defrag_trav_arg
*) sbi
->dfr_pagep
;
1912 struct defrag_trav_header
*header
= (struct defrag_trav_header
*) args
;
1915 /* Check overlapped defrag */
1916 if (atomic_cmpxchg(&sb_dfr
->stat
, DFR_SB_STAT_IDLE
, DFR_SB_STAT_REQ
)) {
1917 dfr_debug("sb_dfr->stat %d", atomic_read(&sb_dfr
->stat
));
1921 /* Check if defrag required */
1923 if (!fsapi_dfr_check_dfr_required(sb
, NULL
, NULL
, NULL
)) {
1924 dfr_debug("Not enough space left for defrag (err %d)", -ENOSPC
);
1925 atomic_set(&sb_dfr
->stat
, DFR_SB_STAT_IDLE
);
1932 err
= copy_from_user(args
, uarg
, PAGE_SIZE
);
1937 * ROOT directory has i_pos = 0 and start_clus = 0 .
1939 if (!(header
->type
& DFR_TRAV_TYPE_HEADER
)) {
1941 dfr_debug("type %d, i_pos %08llx, start_clus %08x",
1942 header
->type
, header
->i_pos
, header
->start_clus
);
1946 /* If FS busy, cancel defrag */
1947 if (!(header
->type
& DFR_TRAV_TYPE_TEST
)) {
1948 unsigned int reserved_clus
= 0, queued_pages
= 0;
1950 err
= defrag_check_fs_busy(sb
, &reserved_clus
, &queued_pages
);
1952 dfr_debug("FS busy, cancel defrag (reserved_clus %d, queued_pages %d)",
1953 reserved_clus
, queued_pages
);
1959 /* Scan given directory and gather info */
1962 err
= fsapi_dfr_scan_dir(sb
, (void *)args
);
1964 inode_unlock(inode
);
1967 /* Copy the result to user */
1968 err
= copy_to_user(uarg
, args
, PAGE_SIZE
);
1972 memset(sbi
->dfr_pagep
, 0, PAGE_SIZE
);
1974 atomic_set(&sb_dfr
->stat
, DFR_SB_STAT_IDLE
);
1979 * @fn sdfat_ioctl_defrag_info
1980 * @brief ioctl to get HW param info
1981 * @return 0 on success, -errno otherwise
1982 * @param sb super block
1983 * @param uarg output buffer
1986 sdfat_ioctl_defrag_info(
1987 IN
struct super_block
*sb
,
1988 OUT
unsigned int *uarg
)
1990 struct defrag_info_arg info_arg
;
1993 memset(&info_arg
, 0, sizeof(struct defrag_info_arg
));
1996 err
= fsapi_dfr_get_info(sb
, &info_arg
);
1999 dfr_debug("IOC_DFR_INFO: sec_per_au %d, hidden_sectors %d",
2000 info_arg
.sec_per_au
, info_arg
.hidden_sectors
);
2002 err
= copy_to_user(uarg
, &info_arg
, sizeof(struct defrag_info_arg
));
2007 #endif /* CONFIG_SDFAT_DFR */
2009 static inline int __do_dfr_map_cluster(struct inode
*inode
, u32 clu_offset
, unsigned int *clus_ptr
)
2011 #ifdef CONFIG_SDFAT_DFR
2012 return fsapi_dfr_map_clus(inode
, clu_offset
, clus_ptr
);
2018 static inline int __check_dfr_on(struct inode
*inode
, loff_t start
, loff_t end
, const char *fname
)
2020 #ifdef CONFIG_SDFAT_DFR
2021 struct defrag_info
*ino_dfr
= &(SDFAT_I(inode
)->dfr_info
);
2023 if ((atomic_read(&ino_dfr
->stat
) == DFR_INO_STAT_REQ
) &&
2024 fsapi_dfr_check_dfr_on(inode
, start
, end
, 0, fname
))
2030 static inline int __cancel_dfr_work(struct inode
*inode
, loff_t start
, loff_t end
, const char *fname
)
2032 #ifdef CONFIG_SDFAT_DFR
2033 struct defrag_info
*ino_dfr
= &(SDFAT_I(inode
)->dfr_info
);
2035 if (atomic_read(&ino_dfr
->stat
) == DFR_INO_STAT_REQ
)
2036 fsapi_dfr_check_dfr_on(inode
, start
, end
, 1, fname
);
2041 static inline int __dfr_writepage_end_io(struct page
*page
)
2043 #ifdef CONFIG_SDFAT_DFR
2044 struct defrag_info
*ino_dfr
= &(SDFAT_I(page
->mapping
->host
)->dfr_info
);
2046 if (atomic_read(&ino_dfr
->stat
) == DFR_INO_STAT_REQ
)
2047 fsapi_dfr_writepage_endio(page
);
2052 static inline void __init_dfr_info(struct inode
*inode
)
2054 #ifdef CONFIG_SDFAT_DFR
2055 memset(&(SDFAT_I(inode
)->dfr_info
), 0, sizeof(struct defrag_info
));
2056 INIT_LIST_HEAD(&(SDFAT_I(inode
)->dfr_info
.entry
));
2057 mutex_init(&(SDFAT_I(inode
)->dfr_info
.lock
));
2061 static inline int __alloc_dfr_mem_if_required(struct super_block
*sb
)
2063 #ifdef CONFIG_SDFAT_DFR
2064 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
2066 if (!sbi
->options
.defrag
)
2069 memset(&sbi
->dfr_info
, 0, sizeof(struct defrag_info
));
2070 INIT_LIST_HEAD(&(sbi
->dfr_info
.entry
));
2071 mutex_init(&(sbi
->dfr_info
.lock
));
2073 sbi
->dfr_new_clus
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
2074 if (!sbi
->dfr_new_clus
) {
2075 dfr_debug("error %d", -ENOMEM
);
2078 sbi
->dfr_new_idx
= 1;
2080 sbi
->dfr_page_wb
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
2081 if (!sbi
->dfr_page_wb
) {
2082 dfr_debug("error %d", -ENOMEM
);
2086 sbi
->dfr_pagep
= alloc_pages_exact(sizeof(struct page
*) *
2087 PAGES_PER_AU(sb
), GFP_KERNEL
| __GFP_ZERO
);
2088 if (!sbi
->dfr_pagep
) {
2089 dfr_debug("error %d", -ENOMEM
);
2096 static void __free_dfr_mem_if_required(struct super_block
*sb
)
2098 #ifdef CONFIG_SDFAT_DFR
2099 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
2101 if (sbi
->dfr_pagep
) {
2102 free_pages_exact(sbi
->dfr_pagep
, sizeof(struct page
*) * PAGES_PER_AU(sb
));
2103 sbi
->dfr_pagep
= NULL
;
2106 /* thanks for kfree */
2107 kfree(sbi
->dfr_page_wb
);
2108 sbi
->dfr_page_wb
= NULL
;
2110 kfree(sbi
->dfr_new_clus
);
2111 sbi
->dfr_new_clus
= NULL
;
2116 static int sdfat_file_mmap(struct file
*file
, struct vm_area_struct
*vm_struct
)
2118 __cancel_dfr_work(file
->f_mapping
->host
,
2119 (loff_t
)vm_struct
->vm_start
,
2120 (loff_t
)(vm_struct
->vm_end
- 1),
2123 return generic_file_mmap(file
, vm_struct
);
2126 static int sdfat_ioctl_volume_id(struct inode
*dir
)
2128 struct sdfat_sb_info
*sbi
= SDFAT_SB(dir
->i_sb
);
2129 FS_INFO_T
*fsi
= &(sbi
->fsi
);
2134 static int sdfat_dfr_ioctl(struct inode
*inode
, struct file
*filp
,
2135 unsigned int cmd
, unsigned long arg
)
2137 #ifdef CONFIG_SDFAT_DFR
2139 case SDFAT_IOCTL_DFR_INFO
: {
2140 struct super_block
*sb
= inode
->i_sb
;
2141 FS_INFO_T
*fsi
= &SDFAT_SB(sb
)->fsi
;
2142 unsigned int __user
*uarg
= (unsigned int __user
*) arg
;
2145 /* Check FS type (FAT32 only) */
2146 if (fsi
->vol_type
!= FAT32
) {
2147 dfr_err("Defrag not supported, vol_type %d", fsi
->vol_type
);
2153 /* Check if SB's defrag option enabled */
2154 if (!(SDFAT_SB(sb
)->options
.defrag
)) {
2155 dfr_err("Defrag not supported, sbi->options.defrag %d", SDFAT_SB(sb
)->options
.defrag
);
2160 /* Only IOCTL on mount-point allowed */
2161 if (filp
->f_path
.mnt
->mnt_root
!= filp
->f_path
.dentry
) {
2162 dfr_err("IOC_DFR_INFO only allowed on ROOT, root %p, dentry %p",
2163 filp
->f_path
.mnt
->mnt_root
, filp
->f_path
.dentry
);
2169 return sdfat_ioctl_defrag_info(sb
, uarg
);
2171 case SDFAT_IOCTL_DFR_TRAV
: {
2172 struct super_block
*sb
= inode
->i_sb
;
2173 FS_INFO_T
*fsi
= &SDFAT_SB(sb
)->fsi
;
2174 unsigned int __user
*uarg
= (unsigned int __user
*) arg
;
2177 /* Check FS type (FAT32 only) */
2178 if (fsi
->vol_type
!= FAT32
) {
2179 dfr_err("Defrag not supported, vol_type %d", fsi
->vol_type
);
2185 /* Check if SB's defrag option enabled */
2186 if (!(SDFAT_SB(sb
)->options
.defrag
)) {
2187 dfr_err("Defrag not supported, sbi->options.defrag %d", SDFAT_SB(sb
)->options
.defrag
);
2193 return sdfat_ioctl_defrag_trav(inode
, uarg
);
2195 case SDFAT_IOCTL_DFR_REQ
: {
2196 struct super_block
*sb
= inode
->i_sb
;
2197 FS_INFO_T
*fsi
= &SDFAT_SB(sb
)->fsi
;
2198 unsigned int __user
*uarg
= (unsigned int __user
*) arg
;
2202 /* Check if FS_ERROR occurred */
2203 if (sb_rdonly(sb
)) {
2204 dfr_err("RDONLY partition (err %d)", -EPERM
);
2209 /* Check FS type (FAT32 only) */
2210 if (fsi
->vol_type
!= FAT32
) {
2211 dfr_err("Defrag not supported, vol_type %d", fsi
->vol_type
);
2217 /* Check if SB's defrag option enabled */
2218 if (!(SDFAT_SB(sb
)->options
.defrag
)) {
2219 dfr_err("Defrag not supported, sbi->options.defrag %d", SDFAT_SB(sb
)->options
.defrag
);
2224 /* Only IOCTL on mount-point allowed */
2225 if (filp
->f_path
.mnt
->mnt_root
!= filp
->f_path
.dentry
) {
2226 dfr_err("IOC_DFR_INFO only allowed on ROOT, root %p, dentry %p",
2227 filp
->f_path
.mnt
->mnt_root
, filp
->f_path
.dentry
);
2233 return sdfat_ioctl_defrag_req(inode
, uarg
);
2235 #ifdef CONFIG_SDFAT_DFR_DEBUG
2236 case SDFAT_IOCTL_DFR_SPO_FLAG
: {
2237 struct sdfat_sb_info
*sbi
= SDFAT_SB(inode
->i_sb
);
2240 ret
= get_user(sbi
->dfr_spo_flag
, (int __user
*)arg
);
2241 dfr_debug("dfr_spo_flag %d", sbi
->dfr_spo_flag
);
2245 #endif /* CONFIG_SDFAT_DFR_DEBUG */
2247 #endif /* CONFIG_SDFAT_DFR */
2249 /* Inappropriate ioctl for device */
2253 static int sdfat_dbg_ioctl(struct inode
*inode
, struct file
*filp
,
2254 unsigned int cmd
, unsigned long arg
)
2256 #ifdef CONFIG_SDFAT_DBG_IOCTL
2257 struct super_block
*sb
= inode
->i_sb
;
2258 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
2262 case SDFAT_IOC_GET_DEBUGFLAGS
:
2263 flags
= sbi
->debug_flags
;
2264 return put_user(flags
, (int __user
*)arg
);
2265 case SDFAT_IOC_SET_DEBUGFLAGS
:
2267 if (!capable(CAP_SYS_ADMIN
))
2270 if (get_user(flags
, (int __user
*) arg
))
2274 sbi
->debug_flags
= flags
;
2277 case SDFAT_IOCTL_PANIC
:
2278 panic("ioctl panic for test");
2280 /* COULD NOT REACH HEAR */
2283 #endif /* CONFIG_SDFAT_DBG_IOCTL */
2287 static long sdfat_generic_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
2289 struct inode
*inode
= file_inode(filp
);
2292 if (cmd
== SDFAT_IOCTL_GET_VOLUME_ID
)
2293 return sdfat_ioctl_volume_id(inode
);
2295 err
= sdfat_dfr_ioctl(inode
, filp
, cmd
, arg
);
2299 /* -ENOTTY if inappropriate ioctl for device */
2300 return sdfat_dbg_ioctl(inode
, filp
, cmd
, arg
);
2303 static int __sdfat_getattr(struct inode
*inode
, struct kstat
*stat
)
2305 TMSG("%s entered\n", __func__
);
2307 generic_fillattr(inode
, stat
);
2308 stat
->blksize
= SDFAT_SB(inode
->i_sb
)->fsi
.cluster_size
;
2310 TMSG("%s exited\n", __func__
);
2314 static void __sdfat_writepage_end_io(struct bio
*bio
, int err
)
2316 struct page
*page
= bio
->bi_io_vec
->bv_page
;
2317 struct super_block
*sb
= page
->mapping
->host
->i_sb
;
2319 ASSERT(bio
->bi_vcnt
== 1); /* Single page endio */
2320 ASSERT(bio_data_dir(bio
)); /* Write */
2324 mapping_set_error(page
->mapping
, err
);
2327 __dfr_writepage_end_io(page
);
2329 #ifdef CONFIG_SDFAT_TRACE_IO
2331 //struct sdfat_sb_info *sbi = SDFAT_SB(bio->bi_bdev->bd_super);
2332 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
2334 sbi
->stat_n_pages_written
++;
2335 if (page
->mapping
->host
== sb
->s_bdev
->bd_inode
)
2336 sbi
->stat_n_bdev_pages_written
++;
2338 /* 4 MB = 1024 pages => 0.4 sec (approx.)
2339 * 32 KB = 64 pages => 0.025 sec
2340 * Min. average latency b/w msgs. ~= 0.025 sec
2342 if ((sbi
->stat_n_pages_written
& 63) == 0) {
2343 DMSG("STAT:%u, %u, %u, %u (Sector #: %u)\n",
2344 sbi
->stat_n_pages_added
, sbi
->stat_n_pages_written
,
2345 sbi
->stat_n_bdev_pages_witten
,
2346 sbi
->stat_n_pages_confused
,
2347 (unsigned int)__sdfat_bio_sector(bio
));
2351 end_page_writeback(page
);
2354 // Update trace info.
2355 atomic_dec(&SDFAT_SB(sb
)->stat_n_pages_queued
);
2359 static int __support_write_inode_sync(struct super_block
*sb
)
2361 #ifdef CONFIG_SDFAT_SUPPORT_DIR_SYNC
2362 #ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
2363 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
2365 if (sbi
->fsi
.vol_type
!= EXFAT
)
2374 static int __sdfat_file_fsync(struct file
*filp
, loff_t start
, loff_t end
, int datasync
)
2376 struct inode
*inode
= filp
->f_mapping
->host
;
2377 struct super_block
*sb
= inode
->i_sb
;
2380 res
= __sdfat_generic_file_fsync(filp
, start
, end
, datasync
);
2382 if (!__support_write_inode_sync(sb
))
2383 err
= fsapi_sync_fs(sb
, 1);
2385 return res
? res
: err
;
2389 static const struct file_operations sdfat_dir_operations
= {
2390 .llseek
= generic_file_llseek
,
2391 .read
= generic_read_dir
,
2392 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
2393 .iterate
= sdfat_iterate
,
2394 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) */
2395 .readdir
= sdfat_readdir
,
2397 .fsync
= sdfat_file_fsync
,
2398 .unlocked_ioctl
= sdfat_generic_ioctl
,
2401 static int __sdfat_create(struct inode
*dir
, struct dentry
*dentry
)
2403 struct super_block
*sb
= dir
->i_sb
;
2404 struct inode
*inode
;
2405 sdfat_timespec_t ts
;
2412 TMSG("%s entered\n", __func__
);
2414 ts
= current_time(dir
);
2416 err
= fsapi_create(dir
, (u8
*) dentry
->d_name
.name
, FM_REGULAR
, &fid
);
2420 __lock_d_revalidate(dentry
);
2422 inode_inc_iversion(dir
);
2423 dir
->i_ctime
= dir
->i_mtime
= dir
->i_atime
= ts
;
2424 if (IS_DIRSYNC(dir
))
2425 (void) sdfat_sync_inode(dir
);
2427 mark_inode_dirty(dir
);
2429 i_pos
= sdfat_make_i_pos(&fid
);
2430 inode
= sdfat_build_inode(sb
, &fid
, i_pos
);
2431 if (IS_ERR(inode
)) {
2432 err
= PTR_ERR(inode
);
2435 inode_inc_iversion(inode
);
2436 inode
->i_mtime
= inode
->i_atime
= inode
->i_ctime
= ts
;
2437 /* timestamp is already written, so mark_inode_dirty() is unneeded. */
2439 d_instantiate(dentry
, inode
);
2441 __unlock_d_revalidate(dentry
);
2443 TMSG("%s exited with err(%d)\n", __func__
, err
);
2445 sdfat_statistics_set_create(fid
.flags
);
2450 static int sdfat_find(struct inode
*dir
, struct qstr
*qname
, FILE_ID_T
*fid
)
2454 if (qname
->len
== 0)
2457 err
= fsapi_lookup(dir
, (u8
*) qname
->name
, fid
);
2464 static int sdfat_d_anon_disconn(struct dentry
*dentry
)
2466 return IS_ROOT(dentry
) && (dentry
->d_flags
& DCACHE_DISCONNECTED
);
2469 static struct dentry
*__sdfat_lookup(struct inode
*dir
, struct dentry
*dentry
)
2471 struct super_block
*sb
= dir
->i_sb
;
2472 struct inode
*inode
;
2473 struct dentry
*alias
;
2481 TMSG("%s entered\n", __func__
);
2482 err
= sdfat_find(dir
, &dentry
->d_name
, &fid
);
2484 if (err
== -ENOENT
) {
2491 i_pos
= sdfat_make_i_pos(&fid
);
2492 inode
= sdfat_build_inode(sb
, &fid
, i_pos
);
2493 if (IS_ERR(inode
)) {
2494 err
= PTR_ERR(inode
);
2498 i_mode
= inode
->i_mode
;
2499 if (S_ISLNK(i_mode
) && !SDFAT_I(inode
)->target
) {
2500 SDFAT_I(inode
)->target
= kmalloc((i_size_read(inode
)+1), GFP_KERNEL
);
2501 if (!SDFAT_I(inode
)->target
) {
2505 fsapi_read_link(dir
, &fid
, SDFAT_I(inode
)->target
, i_size_read(inode
), &ret
);
2506 *(SDFAT_I(inode
)->target
+ i_size_read(inode
)) = '\0';
2509 alias
= d_find_alias(inode
);
2512 * Checking "alias->d_parent == dentry->d_parent" to make sure
2513 * FS is not corrupted (especially double linked dir).
2515 if (alias
&& alias
->d_parent
== dentry
->d_parent
&&
2516 !sdfat_d_anon_disconn(alias
)) {
2519 * Unhashed alias is able to exist because of revalidate()
2520 * called by lookup_fast. You can easily make this status
2521 * by calling create and lookup concurrently
2522 * In such case, we reuse an alias instead of new dentry
2524 if (d_unhashed(alias
)) {
2525 BUG_ON(alias
->d_name
.hash_len
!= dentry
->d_name
.hash_len
);
2526 sdfat_msg(sb
, KERN_INFO
, "rehashed a dentry(%p) "
2527 "in read lookup", alias
);
2530 } else if (!S_ISDIR(i_mode
)) {
2532 * This inode has non anonymous-DCACHE_DISCONNECTED
2533 * dentry. This means, the user did ->lookup() by an
2534 * another name (longname vs 8.3 alias of it) in past.
2536 * Switch to new one for reason of locality if possible.
2538 d_move(alias
, dentry
);
2542 TMSG("%s exited\n", __func__
);
2547 /* initialize d_time even though it is positive dentry */
2548 dentry
->d_time
= (unsigned long)inode_peek_iversion(dir
);
2551 dentry
= d_splice_alias(inode
, dentry
);
2553 TMSG("%s exited\n", __func__
);
2557 TMSG("%s exited with err(%d)\n", __func__
, err
);
2558 return ERR_PTR(err
);
2562 static int sdfat_unlink(struct inode
*dir
, struct dentry
*dentry
)
2564 struct inode
*inode
= dentry
->d_inode
;
2565 struct super_block
*sb
= dir
->i_sb
;
2566 sdfat_timespec_t ts
;
2571 TMSG("%s entered\n", __func__
);
2573 ts
= current_time(dir
);
2575 SDFAT_I(inode
)->fid
.size
= i_size_read(inode
);
2577 __cancel_dfr_work(inode
, 0, SDFAT_I(inode
)->fid
.size
, __func__
);
2579 err
= fsapi_unlink(dir
, &(SDFAT_I(inode
)->fid
));
2583 __lock_d_revalidate(dentry
);
2585 inode_inc_iversion(dir
);
2586 dir
->i_mtime
= dir
->i_atime
= ts
;
2587 if (IS_DIRSYNC(dir
))
2588 (void) sdfat_sync_inode(dir
);
2590 mark_inode_dirty(dir
);
2593 inode
->i_mtime
= inode
->i_atime
= ts
;
2594 sdfat_detach(inode
);
2595 dentry
->d_time
= (unsigned long)inode_peek_iversion(dir
);
2597 __unlock_d_revalidate(dentry
);
2599 TMSG("%s exited with err(%d)\n", __func__
, err
);
2603 static int sdfat_symlink(struct inode
*dir
, struct dentry
*dentry
, const char *target
)
2605 struct super_block
*sb
= dir
->i_sb
;
2606 struct inode
*inode
;
2607 sdfat_timespec_t ts
;
2611 u64 len
= (u64
) strlen(target
);
2614 /* symlink option check */
2615 if (!SDFAT_SB(sb
)->options
.symlink
)
2620 TMSG("%s entered\n", __func__
);
2622 ts
= current_time(dir
);
2624 err
= fsapi_create(dir
, (u8
*) dentry
->d_name
.name
, FM_SYMLINK
, &fid
);
2628 err
= fsapi_write_link(dir
, &fid
, (char *) target
, len
, &ret
);
2631 fsapi_remove(dir
, &fid
);
2635 __lock_d_revalidate(dentry
);
2637 inode_inc_iversion(dir
);
2638 dir
->i_ctime
= dir
->i_mtime
= dir
->i_atime
= ts
;
2639 if (IS_DIRSYNC(dir
))
2640 (void) sdfat_sync_inode(dir
);
2642 mark_inode_dirty(dir
);
2644 i_pos
= sdfat_make_i_pos(&fid
);
2645 inode
= sdfat_build_inode(sb
, &fid
, i_pos
);
2646 if (IS_ERR(inode
)) {
2647 err
= PTR_ERR(inode
);
2650 inode_inc_iversion(inode
);
2651 inode
->i_mtime
= inode
->i_atime
= inode
->i_ctime
= ts
;
2652 /* timestamp is already written, so mark_inode_dirty() is unneeded. */
2654 SDFAT_I(inode
)->target
= kmalloc((len
+1), GFP_KERNEL
);
2655 if (!SDFAT_I(inode
)->target
) {
2659 memcpy(SDFAT_I(inode
)->target
, target
, len
+1);
2661 d_instantiate(dentry
, inode
);
2663 __unlock_d_revalidate(dentry
);
2665 TMSG("%s exited with err(%d)\n", __func__
, err
);
2670 static int __sdfat_mkdir(struct inode
*dir
, struct dentry
*dentry
)
2672 struct super_block
*sb
= dir
->i_sb
;
2673 struct inode
*inode
;
2674 sdfat_timespec_t ts
;
2681 TMSG("%s entered\n", __func__
);
2683 ts
= current_time(dir
);
2685 err
= fsapi_mkdir(dir
, (u8
*) dentry
->d_name
.name
, &fid
);
2689 __lock_d_revalidate(dentry
);
2691 inode_inc_iversion(dir
);
2692 dir
->i_ctime
= dir
->i_mtime
= dir
->i_atime
= ts
;
2693 if (IS_DIRSYNC(dir
))
2694 (void) sdfat_sync_inode(dir
);
2696 mark_inode_dirty(dir
);
2699 i_pos
= sdfat_make_i_pos(&fid
);
2700 inode
= sdfat_build_inode(sb
, &fid
, i_pos
);
2701 if (IS_ERR(inode
)) {
2702 err
= PTR_ERR(inode
);
2705 inode_inc_iversion(inode
);
2706 inode
->i_mtime
= inode
->i_atime
= inode
->i_ctime
= ts
;
2707 /* timestamp is already written, so mark_inode_dirty() is unneeded. */
2709 d_instantiate(dentry
, inode
);
2712 __unlock_d_revalidate(dentry
);
2714 TMSG("%s exited with err(%d)\n", __func__
, err
);
2716 sdfat_statistics_set_mkdir(fid
.flags
);
2721 static int sdfat_rmdir(struct inode
*dir
, struct dentry
*dentry
)
2723 struct inode
*inode
= dentry
->d_inode
;
2724 struct super_block
*sb
= dir
->i_sb
;
2725 sdfat_timespec_t ts
;
2730 TMSG("%s entered\n", __func__
);
2732 ts
= current_time(dir
);
2734 SDFAT_I(inode
)->fid
.size
= i_size_read(inode
);
2736 err
= fsapi_rmdir(dir
, &(SDFAT_I(inode
)->fid
));
2740 __lock_d_revalidate(dentry
);
2742 inode_inc_iversion(dir
);
2743 dir
->i_mtime
= dir
->i_atime
= ts
;
2744 if (IS_DIRSYNC(dir
))
2745 (void) sdfat_sync_inode(dir
);
2747 mark_inode_dirty(dir
);
2751 inode
->i_mtime
= inode
->i_atime
= ts
;
2752 sdfat_detach(inode
);
2753 dentry
->d_time
= (unsigned long)inode_peek_iversion(dir
);
2755 __unlock_d_revalidate(dentry
);
2757 TMSG("%s exited with err(%d)\n", __func__
, err
);
2761 static int __sdfat_rename(struct inode
*old_dir
, struct dentry
*old_dentry
,
2762 struct inode
*new_dir
, struct dentry
*new_dentry
)
2764 struct inode
*old_inode
, *new_inode
;
2765 struct super_block
*sb
= old_dir
->i_sb
;
2766 sdfat_timespec_t ts
;
2772 TMSG("%s entered\n", __func__
);
2774 old_inode
= old_dentry
->d_inode
;
2775 new_inode
= new_dentry
->d_inode
;
2777 ts
= current_time(old_inode
);
2779 SDFAT_I(old_inode
)->fid
.size
= i_size_read(old_inode
);
2781 __cancel_dfr_work(old_inode
, 0, 1, __func__
);
2783 err
= fsapi_rename(old_dir
, &(SDFAT_I(old_inode
)->fid
), new_dir
, new_dentry
);
2787 __lock_d_revalidate(old_dentry
);
2788 __lock_d_revalidate(new_dentry
);
2790 inode_inc_iversion(new_dir
);
2791 new_dir
->i_ctime
= new_dir
->i_mtime
= new_dir
->i_atime
= ts
;
2792 if (IS_DIRSYNC(new_dir
))
2793 (void) sdfat_sync_inode(new_dir
);
2795 mark_inode_dirty(new_dir
);
2797 i_pos
= sdfat_make_i_pos(&(SDFAT_I(old_inode
)->fid
));
2798 sdfat_detach(old_inode
);
2799 sdfat_attach(old_inode
, i_pos
);
2800 if (IS_DIRSYNC(new_dir
))
2801 (void) sdfat_sync_inode(old_inode
);
2803 mark_inode_dirty(old_inode
);
2805 if ((S_ISDIR(old_inode
->i_mode
)) && (old_dir
!= new_dir
)) {
2806 drop_nlink(old_dir
);
2811 inode_inc_iversion(old_dir
);
2812 old_dir
->i_ctime
= old_dir
->i_mtime
= ts
;
2813 if (IS_DIRSYNC(old_dir
))
2814 (void) sdfat_sync_inode(old_dir
);
2816 mark_inode_dirty(old_dir
);
2819 sdfat_detach(new_inode
);
2821 /* skip drop_nlink if new_inode already has been dropped */
2822 if (new_inode
->i_nlink
) {
2823 drop_nlink(new_inode
);
2824 if (S_ISDIR(new_inode
->i_mode
))
2825 drop_nlink(new_inode
);
2827 EMSG("%s : abnormal access to an inode dropped\n",
2829 WARN_ON(new_inode
->i_nlink
== 0);
2831 new_inode
->i_ctime
= ts
;
2833 (void) sdfat_sync_inode(new_inode
);
2838 __unlock_d_revalidate(old_dentry
);
2839 __unlock_d_revalidate(new_dentry
);
2841 TMSG("%s exited with err(%d)\n", __func__
, err
);
2845 static int sdfat_cont_expand(struct inode
*inode
, loff_t size
)
2847 struct address_space
*mapping
= inode
->i_mapping
;
2848 loff_t start
= i_size_read(inode
), count
= size
- i_size_read(inode
);
2851 err
= generic_cont_expand_simple(inode
, size
);
2855 inode
->i_ctime
= inode
->i_mtime
= current_time(inode
);
2856 mark_inode_dirty(inode
);
2858 if (!IS_SYNC(inode
))
2861 err
= filemap_fdatawrite_range(mapping
, start
, start
+ count
- 1);
2862 err2
= sync_mapping_buffers(mapping
);
2863 err
= (err
)?(err
):(err2
);
2864 err2
= write_inode_now(inode
, 1);
2865 err
= (err
)?(err
):(err2
);
2869 return filemap_fdatawait_range(mapping
, start
, start
+ count
- 1);
2872 static int sdfat_allow_set_time(struct sdfat_sb_info
*sbi
, struct inode
*inode
)
2874 mode_t allow_utime
= sbi
->options
.allow_utime
;
2876 if (!uid_eq(current_fsuid(), inode
->i_uid
)) {
2877 if (in_group_p(inode
->i_gid
))
2879 if (allow_utime
& MAY_WRITE
)
2883 /* use a default check */
2887 static int sdfat_sanitize_mode(const struct sdfat_sb_info
*sbi
,
2888 struct inode
*inode
, umode_t
*mode_ptr
)
2890 mode_t i_mode
, mask
, perm
;
2892 i_mode
= inode
->i_mode
;
2894 if (S_ISREG(i_mode
) || S_ISLNK(i_mode
))
2895 mask
= sbi
->options
.fs_fmask
;
2897 mask
= sbi
->options
.fs_dmask
;
2899 perm
= *mode_ptr
& ~(S_IFMT
| mask
);
2901 /* Of the r and x bits, all (subject to umask) must be present.*/
2902 if ((perm
& (S_IRUGO
| S_IXUGO
)) != (i_mode
& (S_IRUGO
| S_IXUGO
)))
2905 if (sdfat_mode_can_hold_ro(inode
)) {
2906 /* Of the w bits, either all (subject to umask) or none must be present. */
2907 if ((perm
& S_IWUGO
) && ((perm
& S_IWUGO
) != (S_IWUGO
& ~mask
)))
2910 /* If sdfat_mode_can_hold_ro(inode) is false, can't change w bits. */
2911 if ((perm
& S_IWUGO
) != (S_IWUGO
& ~mask
))
2915 *mode_ptr
&= S_IFMT
| perm
;
2921 * sdfat_block_truncate_page() zeroes out a mapping from file offset `from'
2922 * up to the end of the block which corresponds to `from'.
2923 * This is required during truncate to physically zeroout the tail end
2924 * of that block so it doesn't yield old data if the file is later grown.
2925 * Also, avoid causing failure from fsx for cases of "data past EOF"
2927 static int sdfat_block_truncate_page(struct inode
*inode
, loff_t from
)
2929 return block_truncate_page(inode
->i_mapping
, from
, sdfat_get_block
);
2932 static int sdfat_setattr(struct dentry
*dentry
, struct iattr
*attr
)
2935 struct sdfat_sb_info
*sbi
= SDFAT_SB(dentry
->d_sb
);
2936 struct inode
*inode
= dentry
->d_inode
;
2937 unsigned int ia_valid
;
2941 TMSG("%s entered\n", __func__
);
2943 if ((attr
->ia_valid
& ATTR_SIZE
)
2944 && (attr
->ia_size
> i_size_read(inode
))) {
2945 error
= sdfat_cont_expand(inode
, attr
->ia_size
);
2946 if (error
|| attr
->ia_valid
== ATTR_SIZE
)
2948 attr
->ia_valid
&= ~ATTR_SIZE
;
2951 /* Check for setting the inode time. */
2952 ia_valid
= attr
->ia_valid
;
2953 if ((ia_valid
& (ATTR_MTIME_SET
| ATTR_ATIME_SET
| ATTR_TIMES_SET
))
2954 && sdfat_allow_set_time(sbi
, inode
)) {
2955 attr
->ia_valid
&= ~(ATTR_MTIME_SET
| ATTR_ATIME_SET
| ATTR_TIMES_SET
);
2958 error
= setattr_prepare(dentry
, attr
);
2959 attr
->ia_valid
= ia_valid
;
2963 if (((attr
->ia_valid
& ATTR_UID
) &&
2964 (!uid_eq(attr
->ia_uid
, sbi
->options
.fs_uid
))) ||
2965 ((attr
->ia_valid
& ATTR_GID
) &&
2966 (!gid_eq(attr
->ia_gid
, sbi
->options
.fs_gid
))) ||
2967 ((attr
->ia_valid
& ATTR_MODE
) &&
2968 (attr
->ia_mode
& ~(S_IFREG
| S_IFLNK
| S_IFDIR
| S_IRWXUGO
)))) {
2974 * We don't return -EPERM here. Yes, strange, but this is too
2977 if (attr
->ia_valid
& ATTR_MODE
) {
2978 if (sdfat_sanitize_mode(sbi
, inode
, &attr
->ia_mode
) < 0)
2979 attr
->ia_valid
&= ~ATTR_MODE
;
2982 SDFAT_I(inode
)->fid
.size
= i_size_read(inode
);
2984 /* patch 1.2.0 : fixed the problem of size mismatch. */
2985 if (attr
->ia_valid
& ATTR_SIZE
) {
2986 error
= sdfat_block_truncate_page(inode
, attr
->ia_size
);
2990 old_size
= i_size_read(inode
);
2992 /* TO CHECK evicting directory works correctly */
2993 MMSG("%s: inode(%p) truncate size (%llu->%llu)\n", __func__
,
2994 inode
, (u64
)old_size
, (u64
)attr
->ia_size
);
2995 __sdfat_do_truncate(inode
, old_size
, attr
->ia_size
);
2997 setattr_copy(inode
, attr
);
2998 mark_inode_dirty(inode
);
3000 TMSG("%s exited with err(%d)\n", __func__
, error
);
3004 static const struct inode_operations sdfat_dir_inode_operations
= {
3005 .create
= sdfat_create
,
3006 .lookup
= sdfat_lookup
,
3007 .unlink
= sdfat_unlink
,
3008 .symlink
= sdfat_symlink
,
3009 .mkdir
= sdfat_mkdir
,
3010 .rmdir
= sdfat_rmdir
,
3011 .rename
= sdfat_rename
,
3012 .setattr
= sdfat_setattr
,
3013 .getattr
= sdfat_getattr
,
3014 #ifdef CONFIG_SDFAT_VIRTUAL_XATTR
3015 .listxattr
= sdfat_listxattr
,
3016 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
3017 .setxattr
= sdfat_setxattr
,
3018 .getxattr
= sdfat_getxattr
,
3019 .removexattr
= sdfat_removexattr
,
3024 /*======================================================================*/
3025 /* File Operations */
3026 /*======================================================================*/
3027 static const struct inode_operations sdfat_symlink_inode_operations
= {
3028 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
3029 .readlink
= generic_readlink
,
3031 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
3032 .get_link
= sdfat_follow_link
,
3033 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) */
3034 .follow_link
= sdfat_follow_link
,
3036 #ifdef CONFIG_SDFAT_VIRTUAL_XATTR
3037 .listxattr
= sdfat_listxattr
,
3038 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
3039 .setxattr
= sdfat_setxattr
,
3040 .getxattr
= sdfat_getxattr
,
3041 .removexattr
= sdfat_removexattr
,
3046 static int sdfat_file_release(struct inode
*inode
, struct file
*filp
)
3048 struct super_block
*sb
= inode
->i_sb
;
3050 /* Moved below code from sdfat_write_inode
3051 * TO FIX size-mismatch problem.
3053 /* FIXME : Added bug_on to confirm that there is no size mismatch */
3054 sdfat_debug_bug_on(SDFAT_I(inode
)->fid
.size
!= i_size_read(inode
));
3055 SDFAT_I(inode
)->fid
.size
= i_size_read(inode
);
3056 fsapi_sync_fs(sb
, 0);
3060 static const struct file_operations sdfat_file_operations
= {
3061 .llseek
= generic_file_llseek
,
3062 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
3063 .read_iter
= generic_file_read_iter
,
3064 .write_iter
= generic_file_write_iter
,
3065 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
3066 .read
= new_sync_read
,
3067 .write
= new_sync_write
,
3068 .read_iter
= generic_file_read_iter
,
3069 .write_iter
= generic_file_write_iter
,
3070 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) */
3071 .read
= do_sync_read
,
3072 .write
= do_sync_write
,
3073 .aio_read
= generic_file_aio_read
,
3074 .aio_write
= generic_file_aio_write
,
3076 .mmap
= sdfat_file_mmap
,
3077 .release
= sdfat_file_release
,
3078 .unlocked_ioctl
= sdfat_generic_ioctl
,
3079 .fsync
= sdfat_file_fsync
,
3080 .splice_read
= generic_file_splice_read
,
3083 static const struct address_space_operations sdfat_da_aops
;
3084 static const struct address_space_operations sdfat_aops
;
3086 static void sdfat_truncate(struct inode
*inode
, loff_t old_size
)
3088 struct super_block
*sb
= inode
->i_sb
;
3089 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
3090 FS_INFO_T
*fsi
= &(sbi
->fsi
);
3091 unsigned int blocksize
= 1 << inode
->i_blkbits
;
3092 loff_t aligned_size
;
3097 if (SDFAT_I(inode
)->fid
.start_clu
== 0) {
3098 /* Stange statement:
3099 * Empty start_clu != ~0 (not allocated)
3101 sdfat_fs_error(sb
, "tried to truncate zeroed cluster.");
3105 sdfat_debug_check_clusters(inode
);
3107 __cancel_dfr_work(inode
, (loff_t
)i_size_read(inode
), (loff_t
)old_size
, __func__
);
3109 err
= fsapi_truncate(inode
, old_size
, i_size_read(inode
));
3113 inode
->i_ctime
= inode
->i_mtime
= current_time(inode
);
3114 if (IS_DIRSYNC(inode
))
3115 (void) sdfat_sync_inode(inode
);
3117 mark_inode_dirty(inode
);
3120 // inode->i_blocks = ((SDFAT_I(inode)->i_size_ondisk + (fsi->cluster_size - 1))
3121 inode
->i_blocks
= ((i_size_read(inode
) + (fsi
->cluster_size
- 1)) &
3122 ~((loff_t
)fsi
->cluster_size
- 1)) >> inode
->i_blkbits
;
3125 * This protects against truncating a file bigger than it was then
3126 * trying to write into the hole.
3128 * comment by sh.hong:
3129 * This seems to mean 'intra page/block' truncate and writing.
3130 * I couldn't find a reason to change the values prior to fsapi_truncate
3131 * Therefore, I switched the order of operations
3132 * so that it's possible to utilize i_size_ondisk in fsapi_truncate
3135 aligned_size
= i_size_read(inode
);
3136 if (aligned_size
& (blocksize
- 1)) {
3137 aligned_size
|= (blocksize
- 1);
3141 if (SDFAT_I(inode
)->i_size_ondisk
> i_size_read(inode
))
3142 SDFAT_I(inode
)->i_size_ondisk
= aligned_size
;
3144 sdfat_debug_check_clusters(inode
);
3146 if (SDFAT_I(inode
)->i_size_aligned
> i_size_read(inode
))
3147 SDFAT_I(inode
)->i_size_aligned
= aligned_size
;
3149 /* After truncation :
3150 * 1) Delayed allocation is OFF
3151 * i_size = i_size_ondisk <= i_size_aligned
3152 * (useless size var.)
3154 * 2) Delayed allocation is ON
3155 * i_size = i_size_ondisk = i_size_aligned
3156 * (will be block-aligned after write)
3158 * i_size_ondisk < i_size <= i_size_aligned (block_aligned)
3159 * (will be block-aligned after write)
3165 static const struct inode_operations sdfat_file_inode_operations
= {
3166 .setattr
= sdfat_setattr
,
3167 .getattr
= sdfat_getattr
,
3168 #ifdef CONFIG_SDFAT_VIRTUAL_XATTR
3169 .listxattr
= sdfat_listxattr
,
3170 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
3171 .setxattr
= sdfat_setxattr
,
3172 .getxattr
= sdfat_getxattr
,
3173 .removexattr
= sdfat_removexattr
,
3178 /*======================================================================*/
3179 /* Address Space Operations */
3180 /*======================================================================*/
3181 /* 2-level option flag */
3182 #define BMAP_NOT_CREATE 0
3183 #define BMAP_ADD_BLOCK 1
3184 #define BMAP_ADD_CLUSTER 2
3185 #define BLOCK_ADDED(bmap_ops) (bmap_ops)
3186 static int sdfat_bmap(struct inode
*inode
, sector_t sector
, sector_t
*phys
,
3187 unsigned long *mapped_blocks
, int *create
)
3189 struct super_block
*sb
= inode
->i_sb
;
3190 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
3191 FS_INFO_T
*fsi
= &(sbi
->fsi
);
3192 const unsigned long blocksize
= sb
->s_blocksize
;
3193 const unsigned char blocksize_bits
= sb
->s_blocksize_bits
;
3194 sector_t last_block
;
3195 unsigned int cluster
, clu_offset
, sec_offset
;
3201 /* core code should handle EIO */
3203 if (fsi
->prev_eio
&& BLOCK_ADDED(*create
))
3207 if (((fsi
->vol_type
== FAT12
) || (fsi
->vol_type
== FAT16
)) &&
3208 (inode
->i_ino
== SDFAT_ROOT_INO
)) {
3209 if (sector
< (fsi
->dentries_in_root
>>
3210 (sb
->s_blocksize_bits
- DENTRY_SIZE_BITS
))) {
3211 *phys
= sector
+ fsi
->root_start_sector
;
3217 last_block
= (i_size_read(inode
) + (blocksize
- 1)) >> blocksize_bits
;
3218 if ((sector
>= last_block
) && (*create
== BMAP_NOT_CREATE
))
3221 /* Is this block already allocated? */
3222 clu_offset
= sector
>> fsi
->sect_per_clus_bits
; /* cluster offset */
3224 SDFAT_I(inode
)->fid
.size
= i_size_read(inode
);
3227 if (unlikely(__check_dfr_on(inode
,
3228 (loff_t
)((loff_t
)clu_offset
<< fsi
->cluster_size_bits
),
3229 (loff_t
)((loff_t
)(clu_offset
+ 1) << fsi
->cluster_size_bits
),
3231 err
= __do_dfr_map_cluster(inode
, clu_offset
, &cluster
);
3233 if (*create
& BMAP_ADD_CLUSTER
)
3234 err
= fsapi_map_clus(inode
, clu_offset
, &cluster
, 1);
3236 err
= fsapi_map_clus(inode
, clu_offset
, &cluster
, ALLOC_NOWHERE
);
3246 sdfat_statistics_set_rw(SDFAT_I(inode
)->fid
.flags
,
3247 clu_offset
, *create
& BMAP_ADD_CLUSTER
);
3249 if (!IS_CLUS_EOF(cluster
)) {
3250 /* sector offset in cluster */
3251 sec_offset
= sector
& (fsi
->sect_per_clus
- 1);
3253 *phys
= CLUS_TO_SECT(fsi
, cluster
) + sec_offset
;
3254 *mapped_blocks
= fsi
->sect_per_clus
- sec_offset
;
3258 /* Debug purpose (new clu needed) */
3259 ASSERT((*create
& BMAP_ADD_CLUSTER
) == 0);
3260 ASSERT(sector
>= last_block
);
3264 if (sector
< last_block
)
3265 *create
= BMAP_NOT_CREATE
;
3267 else if (sector
>= last_block
)
3270 if (iblock
<= last mapped
-block
)
3272 *create
= BMAP_NOT_CREATE
3273 else if (iblock
<= last cluster
)
3280 static int sdfat_da_prep_block(struct inode
*inode
, sector_t iblock
,
3281 struct buffer_head
*bh_result
, int create
)
3283 struct super_block
*sb
= inode
->i_sb
;
3284 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
3285 FS_INFO_T
*fsi
= &(sbi
->fsi
);
3286 unsigned long max_blocks
= bh_result
->b_size
>> inode
->i_blkbits
;
3287 unsigned long mapped_blocks
;
3291 int bmap_create
= create
? BMAP_ADD_BLOCK
: BMAP_NOT_CREATE
;
3297 ASSERT(fsi
->vol_type
== FAT32
);
3299 err
= sdfat_bmap(inode
, iblock
, &phys
, &mapped_blocks
, &bmap_create
);
3302 sdfat_fs_error_ratelimit(sb
, "%s: failed to bmap "
3303 "(iblock:%u, err:%d)", __func__
,
3308 sec_offset
= iblock
& (fsi
->sect_per_clus
- 1);
3311 /* the block in in the mapped cluster boundary */
3312 max_blocks
= min(mapped_blocks
, max_blocks
);
3313 map_bh(bh_result
, sb
, phys
);
3315 BUG_ON(BLOCK_ADDED(bmap_create
) && (sec_offset
== 0));
3317 } else if (create
== 1) {
3318 /* Not exist: new cluster needed */
3319 if (!BLOCK_ADDED(bmap_create
)) {
3320 sector_t last_block
;
3321 last_block
= (i_size_read(inode
) + (sb
->s_blocksize
- 1))
3322 >> sb
->s_blocksize_bits
;
3323 sdfat_fs_error(sb
, "%s: new cluster need, but "
3324 "bmap_create == BMAP_NOT_CREATE(iblock:%lld, "
3325 "last_block:%lld)", __func__
,
3326 (s64
)iblock
, (s64
)last_block
);
3331 // Reserved Cluster (only if iblock is the first sector in a clu)
3332 if (sec_offset
== 0) {
3333 err
= fsapi_reserve_clus(inode
);
3336 sdfat_fs_error_ratelimit(sb
,
3337 "%s: failed to bmap "
3338 "(iblock:%u, err:%d)", __func__
,
3346 map_bh(bh_result
, sb
, ~((sector_t
) 0xffff));
3347 set_buffer_new(bh_result
);
3348 set_buffer_delay(bh_result
);
3351 /* get_block on non-existing addr. with create==0 */
3354 * i_size_aligned 보다 작으면 delay 매핑을 일단
3356 * - 0-fill 을 항상 하기에, FAT 에서는 문제 없음.
3357 * 중간에 영역이 꽉 찼으면, 디스크에 내려가지 않고는
3358 * invalidate 될 일이 없음
3364 /* Newly added blocks */
3365 if (BLOCK_ADDED(bmap_create
)) {
3366 set_buffer_new(bh_result
);
3368 SDFAT_I(inode
)->i_size_aligned
+= max_blocks
<< sb
->s_blocksize_bits
;
3370 /* i_size_ondisk changes if a block added in the existing cluster */
3371 #define num_clusters(value) ((value) ? (s32)((value - 1) >> fsi->cluster_size_bits) + 1 : 0)
3373 /* FOR GRACEFUL ERROR HANDLING */
3374 if (num_clusters(SDFAT_I(inode
)->i_size_aligned
) !=
3375 num_clusters(SDFAT_I(inode
)->i_size_ondisk
)) {
3376 EMSG("%s: inode(%p) invalid size (create(%d) "
3377 "bmap_create(%d) phys(%lld) aligned(%lld) "
3378 "on_disk(%lld) iblock(%u) sec_off(%d))\n",
3379 __func__
, inode
, create
, bmap_create
, (s64
)phys
,
3380 (s64
)SDFAT_I(inode
)->i_size_aligned
,
3381 (s64
)SDFAT_I(inode
)->i_size_ondisk
,
3384 sdfat_debug_bug_on(1);
3386 SDFAT_I(inode
)->i_size_ondisk
= SDFAT_I(inode
)->i_size_aligned
;
3389 pos
= (iblock
+ 1) << sb
->s_blocksize_bits
;
3390 /* Debug purpose - defensive coding */
3391 ASSERT(SDFAT_I(inode
)->i_size_aligned
== pos
);
3392 if (SDFAT_I(inode
)->i_size_aligned
< pos
)
3393 SDFAT_I(inode
)->i_size_aligned
= pos
;
3396 #ifdef CONFIG_SDFAT_TRACE_IO
3397 /* New page added (ASSERTION: 8 blocks per page) */
3398 if ((sec_offset
& 7) == 0)
3399 sbi
->stat_n_pages_added
++;
3403 /* FOR GRACEFUL ERROR HANDLING */
3404 if (i_size_read(inode
) > SDFAT_I(inode
)->i_size_aligned
) {
3405 sdfat_fs_error_ratelimit(sb
, "%s: invalid size (inode(%p), "
3406 "size(%llu) > aligned(%llu)\n", __func__
, inode
,
3407 i_size_read(inode
), SDFAT_I(inode
)->i_size_aligned
);
3408 sdfat_debug_bug_on(1);
3411 bh_result
->b_size
= max_blocks
<< sb
->s_blocksize_bits
;
3418 static int sdfat_get_block(struct inode
*inode
, sector_t iblock
,
3419 struct buffer_head
*bh_result
, int create
)
3421 struct super_block
*sb
= inode
->i_sb
;
3422 unsigned long max_blocks
= bh_result
->b_size
>> inode
->i_blkbits
;
3424 unsigned long mapped_blocks
;
3427 int bmap_create
= create
? BMAP_ADD_CLUSTER
: BMAP_NOT_CREATE
;
3430 err
= sdfat_bmap(inode
, iblock
, &phys
, &mapped_blocks
, &bmap_create
);
3433 sdfat_fs_error_ratelimit(sb
, "%s: failed to bmap "
3434 "(inode:%p iblock:%u, err:%d)",
3435 __func__
, inode
, (u32
)iblock
, err
);
3440 max_blocks
= min(mapped_blocks
, max_blocks
);
3442 /* Treat newly added block / cluster */
3443 if (BLOCK_ADDED(bmap_create
) || buffer_delay(bh_result
)) {
3445 /* Update i_size_ondisk */
3446 pos
= (iblock
+ 1) << sb
->s_blocksize_bits
;
3447 if (SDFAT_I(inode
)->i_size_ondisk
< pos
) {
3449 if ((pos
- SDFAT_I(inode
)->i_size_ondisk
) > bh_result
->b_size
) {
3450 /* This never happens without DA */
3451 MMSG("Jumping get_block\n");
3454 SDFAT_I(inode
)->i_size_ondisk
= pos
;
3455 sdfat_debug_check_clusters(inode
);
3458 if (BLOCK_ADDED(bmap_create
)) {
3460 * create == 1 only if iblock > i_size
3465 * Truncate와 동시에 발생할 경우,
3466 * i_size < (i_block 위치) 면서 buffer_delay()가
3469 * 기존에 할당된 영역을 다시 쓸 뿐이므로 큰 문제
3470 * 없지만, 그 경우, 미리 i_size_aligned 가 확장된
3474 /* FOR GRACEFUL ERROR HANDLING */
3475 if (buffer_delay(bh_result
) &&
3476 (pos
> SDFAT_I(inode
)->i_size_aligned
)) {
3477 sdfat_fs_error(sb
, "requested for bmap "
3478 "out of range(pos:(%llu)>i_size_aligned(%llu)\n",
3479 pos
, SDFAT_I(inode
)->i_size_aligned
);
3480 sdfat_debug_bug_on(1);
3484 set_buffer_new(bh_result
);
3487 * adjust i_size_aligned if i_size_ondisk is
3488 * bigger than it. (i.e. non-DA)
3490 if (SDFAT_I(inode
)->i_size_ondisk
>
3491 SDFAT_I(inode
)->i_size_aligned
) {
3492 SDFAT_I(inode
)->i_size_aligned
=
3493 SDFAT_I(inode
)->i_size_ondisk
;
3497 if (buffer_delay(bh_result
))
3498 clear_buffer_delay(bh_result
);
3502 if (SDFAT_I(inode
)->i_size_ondisk
>
3503 SDFAT_I(inode
)->i_size_aligned
) {
3504 /* Only after truncate
3505 * and the two size variables should indicate
3508 unsigned int blocksize
= 1 << inode
->i_blkbits
;
3509 BUG_ON(SDFAT_I(inode
)->i_size_ondisk
-
3510 SDFAT_I(inode
)->i_size_aligned
>= blocksize
);
3514 map_bh(bh_result
, sb
, phys
);
3517 bh_result
->b_size
= max_blocks
<< sb
->s_blocksize_bits
;
3523 static int sdfat_readpage(struct file
*file
, struct page
*page
)
3527 ret
= mpage_readpage(page
, sdfat_get_block
);
3531 static int sdfat_readpages(struct file
*file
, struct address_space
*mapping
,
3532 struct list_head
*pages
, unsigned int nr_pages
)
3536 ret
= mpage_readpages(mapping
, pages
, nr_pages
, sdfat_get_block
);
3540 static inline void sdfat_submit_fullpage_bio(struct block_device
*bdev
,
3541 sector_t sector
, unsigned int length
,
3542 struct page
*page
, struct writeback_control
*wbc
)
3544 /* Single page bio submit */
3547 BUG_ON((length
> PAGE_SIZE
) || (length
== 0));
3550 * If __GFP_WAIT is set, then bio_alloc will always be able to allocate
3551 * a bio. This is due to the mempool guarantees. To make this work, callers
3552 * must never allocate more than 1 bio at a time from this pool.
3554 * #define GFP_NOIO (__GFP_WAIT)
3556 bio
= bio_alloc(GFP_NOIO
, 1);
3558 bio_set_dev(bio
, bdev
);
3560 bio
->bi_io_vec
[0].bv_page
= page
; /* Inline vec */
3561 bio
->bi_io_vec
[0].bv_len
= length
; /* PAGE_SIZE */
3562 bio
->bi_io_vec
[0].bv_offset
= 0;
3563 __sdfat_set_bio_iterate(bio
, sector
, length
, 0, 0);
3565 bio
->bi_end_io
= sdfat_writepage_end_io
;
3566 __sdfat_submit_bio_write(bio
, wbc
);
3569 static int sdfat_writepage(struct page
*page
, struct writeback_control
*wbc
)
3571 struct inode
* const inode
= page
->mapping
->host
;
3572 struct super_block
*sb
= inode
->i_sb
;
3573 loff_t i_size
= i_size_read(inode
);
3574 const pgoff_t end_index
= i_size
>> PAGE_SHIFT
;
3575 const unsigned int blocks_per_page
= PAGE_SIZE
>> inode
->i_blkbits
;
3576 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
3577 struct buffer_head
*bh
, *head
;
3578 sector_t block
, block_0
, last_phys
;
3580 unsigned int nr_blocks_towrite
= blocks_per_page
;
3582 /* Don't distinguish 0-filled/clean block.
3583 * Just write back the whole page
3585 if (fsi
->cluster_size
< PAGE_SIZE
)
3588 if (!PageUptodate(page
)) {
3589 MMSG("%s: Not up-to-date page -> block_write_full_page\n",
3594 if (page
->index
>= end_index
) {
3595 /* last page or outside i_size */
3596 unsigned int offset
= i_size
& (PAGE_SIZE
-1);
3598 /* If a truncation is in progress */
3599 if (page
->index
> end_index
|| !offset
)
3602 /* 0-fill after i_size */
3603 zero_user_segment(page
, offset
, PAGE_SIZE
);
3606 if (!page_has_buffers(page
)) {
3607 MMSG("WP: No buffers -> block_write_full_page\n");
3611 block
= (sector_t
)page
->index
<< (PAGE_SHIFT
- inode
->i_blkbits
);
3612 block_0
= block
; /* first block */
3613 head
= page_buffers(page
);
3618 BUG_ON(buffer_locked(bh
));
3620 if (!buffer_dirty(bh
) || !buffer_uptodate(bh
)) {
3621 if (nr_blocks_towrite
== blocks_per_page
)
3622 nr_blocks_towrite
= (unsigned int) (block
- block_0
);
3624 BUG_ON(nr_blocks_towrite
>= blocks_per_page
);
3626 // !uptodate but dirty??
3627 if (buffer_dirty(bh
))
3630 // Nothing to writeback in this block
3631 bh
= bh
->b_this_page
;
3636 if (nr_blocks_towrite
!= blocks_per_page
)
3637 // Dirty -> Non-dirty -> Dirty again case
3641 if (!buffer_mapped(bh
) || buffer_delay(bh
)) {
3642 BUG_ON(bh
->b_size
!= (1 << (inode
->i_blkbits
)));
3643 ret
= sdfat_get_block(inode
, block
, bh
, 1);
3647 if (buffer_new(bh
)) {
3648 clear_buffer_new(bh
);
3649 __sdfat_clean_bdev_aliases(bh
->b_bdev
, bh
->b_blocknr
);
3653 /* continuity check */
3654 if (((last_phys
+ 1) != bh
->b_blocknr
) && (last_phys
!= 0)) {
3655 DMSG("Non-contiguous block mapping in single page");
3659 last_phys
= bh
->b_blocknr
;
3660 bh
= bh
->b_this_page
;
3662 } while (bh
!= head
);
3664 if (nr_blocks_towrite
== 0) {
3665 DMSG("Page dirty but no dirty bh? alloc_208\n");
3672 clear_buffer_dirty(bh
);
3673 bh
= bh
->b_this_page
;
3674 } while (bh
!= head
);
3676 BUG_ON(PageWriteback(page
));
3677 set_page_writeback(page
);
3680 * Turn off MAPPED flag in victim's bh if defrag on.
3681 * Another write_begin can starts after get_block for defrag victims called.
3682 * In this case, write_begin calls get_block and get original block number
3683 * and previous defrag will be canceled.
3685 if (unlikely(__check_dfr_on(inode
,
3686 (loff_t
)(page
->index
<< PAGE_SHIFT
),
3687 (loff_t
)((page
->index
+ 1) << PAGE_SHIFT
),
3690 clear_buffer_mapped(bh
);
3691 bh
= bh
->b_this_page
;
3692 } while (bh
!= head
);
3695 // Trace # of pages queued (Approx.)
3696 atomic_inc(&SDFAT_SB(sb
)->stat_n_pages_queued
);
3698 sdfat_submit_fullpage_bio(head
->b_bdev
,
3699 head
->b_blocknr
<< (sb
->s_blocksize_bits
- SECTOR_SIZE_BITS
),
3700 nr_blocks_towrite
<< inode
->i_blkbits
,
3708 #ifdef CONFIG_SDFAT_TRACE_IO
3709 SDFAT_SB(sb
)->stat_n_pages_confused
++;
3711 ret
= block_write_full_page(page
, sdfat_get_block
, wbc
);
3715 static int sdfat_da_writepages(struct address_space
*mapping
,
3716 struct writeback_control
*wbc
)
3718 MMSG("%s(inode:%p) with nr_to_write = 0x%08lx "
3719 "(ku %d, bg %d, tag %d, rc %d )\n",
3720 __func__
, mapping
->host
, wbc
->nr_to_write
,
3721 wbc
->for_kupdate
, wbc
->for_background
, wbc
->tagged_writepages
,
3724 ASSERT(mapping
->a_ops
== &sdfat_da_aops
);
3726 #ifdef CONFIG_SDFAT_ALIGNED_MPAGE_WRITE
3727 if (SDFAT_SB(mapping
->host
->i_sb
)->options
.adj_req
)
3728 return sdfat_mpage_writepages(mapping
, wbc
, sdfat_get_block
);
3730 return generic_writepages(mapping
, wbc
);
3733 static int sdfat_writepages(struct address_space
*mapping
,
3734 struct writeback_control
*wbc
)
3736 MMSG("%s(inode:%p) with nr_to_write = 0x%08lx "
3737 "(ku %d, bg %d, tag %d, rc %d )\n",
3738 __func__
, mapping
->host
, wbc
->nr_to_write
,
3739 wbc
->for_kupdate
, wbc
->for_background
, wbc
->tagged_writepages
,
3742 ASSERT(mapping
->a_ops
== &sdfat_aops
);
3744 #ifdef CONFIG_SDFAT_ALIGNED_MPAGE_WRITE
3745 if (SDFAT_SB(mapping
->host
->i_sb
)->options
.adj_req
)
3746 return sdfat_mpage_writepages(mapping
, wbc
, sdfat_get_block
);
3748 return mpage_writepages(mapping
, wbc
, sdfat_get_block
);
3751 static void sdfat_write_failed(struct address_space
*mapping
, loff_t to
)
3753 struct inode
*inode
= mapping
->host
;
3755 if (to
> i_size_read(inode
)) {
3756 __sdfat_truncate_pagecache(inode
, to
, i_size_read(inode
));
3757 sdfat_truncate(inode
, SDFAT_I(inode
)->i_size_aligned
);
3761 static int sdfat_check_writable(struct super_block
*sb
)
3763 if (fsapi_check_bdi_valid(sb
))
3772 static int __sdfat_write_begin(struct file
*file
, struct address_space
*mapping
,
3773 loff_t pos
, unsigned int len
,
3774 unsigned int flags
, struct page
**pagep
,
3775 void **fsdata
, get_block_t
*get_block
,
3776 loff_t
*bytes
, const char *fname
)
3778 struct super_block
*sb
= mapping
->host
->i_sb
;
3781 __cancel_dfr_work(mapping
->host
, pos
, (loff_t
)(pos
+ len
), fname
);
3783 ret
= sdfat_check_writable(sb
);
3784 if (unlikely(ret
< 0))
3788 ret
= cont_write_begin(file
, mapping
, pos
, len
, flags
, pagep
, fsdata
,
3792 sdfat_write_failed(mapping
, pos
+len
);
3798 static int sdfat_da_write_begin(struct file
*file
, struct address_space
*mapping
,
3799 loff_t pos
, unsigned int len
, unsigned int flags
,
3800 struct page
**pagep
, void **fsdata
)
3802 return __sdfat_write_begin(file
, mapping
, pos
, len
, flags
,
3803 pagep
, fsdata
, sdfat_da_prep_block
,
3804 &SDFAT_I(mapping
->host
)->i_size_aligned
,
3809 static int sdfat_write_begin(struct file
*file
, struct address_space
*mapping
,
3810 loff_t pos
, unsigned int len
, unsigned int flags
,
3811 struct page
**pagep
, void **fsdata
)
3813 return __sdfat_write_begin(file
, mapping
, pos
, len
, flags
,
3814 pagep
, fsdata
, sdfat_get_block
,
3815 &SDFAT_I(mapping
->host
)->i_size_ondisk
,
3819 static int sdfat_write_end(struct file
*file
, struct address_space
*mapping
,
3820 loff_t pos
, unsigned int len
, unsigned int copied
,
3821 struct page
*pagep
, void *fsdata
)
3823 struct inode
*inode
= mapping
->host
;
3824 FILE_ID_T
*fid
= &(SDFAT_I(inode
)->fid
);
3827 err
= generic_write_end(file
, mapping
, pos
, len
, copied
, pagep
, fsdata
);
3829 /* FOR GRACEFUL ERROR HANDLING */
3830 if (SDFAT_I(inode
)->i_size_aligned
< i_size_read(inode
)) {
3831 sdfat_fs_error(inode
->i_sb
, "invalid size(size(%llu) "
3832 "> aligned(%llu)\n", i_size_read(inode
),
3833 SDFAT_I(inode
)->i_size_aligned
);
3834 sdfat_debug_bug_on(1);
3838 sdfat_write_failed(mapping
, pos
+len
);
3840 if (!(err
< 0) && !(fid
->attr
& ATTR_ARCHIVE
)) {
3841 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
3842 fid
->attr
|= ATTR_ARCHIVE
;
3843 mark_inode_dirty(inode
);
3849 static inline ssize_t
__sdfat_direct_IO(int rw
, struct kiocb
*iocb
,
3850 struct inode
*inode
, void *iov_u
, loff_t offset
,
3851 loff_t count
, unsigned long nr_segs
)
3853 struct address_space
*mapping
= inode
->i_mapping
;
3854 loff_t size
= offset
+ count
;
3859 * FIXME: blockdev_direct_IO() doesn't use ->write_begin(),
3860 * so we need to update the ->i_size_aligned to block boundary.
3862 * But we must fill the remaining area or hole by nul for
3863 * updating ->i_size_aligned
3865 * Return 0, and fallback to normal buffered write.
3867 if (SDFAT_I(inode
)->i_size_aligned
< size
)
3872 * sdFAT need to use the DIO_LOCKING for avoiding the race
3873 * condition of sdfat_get_block() and ->truncate().
3875 ret
= __sdfat_blkdev_direct_IO(rw
, iocb
, inode
, iov_u
, offset
, nr_segs
);
3876 if (ret
< 0 && (rw
& WRITE
))
3877 sdfat_write_failed(mapping
, size
);
3882 static const struct address_space_operations sdfat_aops
= {
3883 .readpage
= sdfat_readpage
,
3884 .readpages
= sdfat_readpages
,
3885 .writepage
= sdfat_writepage
,
3886 .writepages
= sdfat_writepages
,
3887 .write_begin
= sdfat_write_begin
,
3888 .write_end
= sdfat_write_end
,
3889 .direct_IO
= sdfat_direct_IO
,
3890 .bmap
= sdfat_aop_bmap
3893 static const struct address_space_operations sdfat_da_aops
= {
3894 .readpage
= sdfat_readpage
,
3895 .readpages
= sdfat_readpages
,
3896 .writepage
= sdfat_writepage
,
3897 .writepages
= sdfat_da_writepages
,
3898 .write_begin
= sdfat_da_write_begin
,
3899 .write_end
= sdfat_write_end
,
3900 .direct_IO
= sdfat_direct_IO
,
3901 .bmap
= sdfat_aop_bmap
3904 /*======================================================================*/
3905 /* Super Operations */
3906 /*======================================================================*/
3908 static inline unsigned long sdfat_hash(loff_t i_pos
)
3910 return hash_32(i_pos
, SDFAT_HASH_BITS
);
3913 static void sdfat_attach(struct inode
*inode
, loff_t i_pos
)
3915 struct sdfat_sb_info
*sbi
= SDFAT_SB(inode
->i_sb
);
3916 struct hlist_head
*head
= sbi
->inode_hashtable
+ sdfat_hash(i_pos
);
3918 spin_lock(&sbi
->inode_hash_lock
);
3919 SDFAT_I(inode
)->i_pos
= i_pos
;
3920 hlist_add_head(&SDFAT_I(inode
)->i_hash_fat
, head
);
3921 spin_unlock(&sbi
->inode_hash_lock
);
3924 static void sdfat_detach(struct inode
*inode
)
3926 struct sdfat_sb_info
*sbi
= SDFAT_SB(inode
->i_sb
);
3928 spin_lock(&sbi
->inode_hash_lock
);
3929 hlist_del_init(&SDFAT_I(inode
)->i_hash_fat
);
3930 SDFAT_I(inode
)->i_pos
= 0;
3931 spin_unlock(&sbi
->inode_hash_lock
);
3935 /* doesn't deal with root inode */
3936 static int sdfat_fill_inode(struct inode
*inode
, const FILE_ID_T
*fid
)
3938 struct sdfat_sb_info
*sbi
= SDFAT_SB(inode
->i_sb
);
3939 FS_INFO_T
*fsi
= &(sbi
->fsi
);
3941 u64 size
= fid
->size
;
3943 memcpy(&(SDFAT_I(inode
)->fid
), fid
, sizeof(FILE_ID_T
));
3945 SDFAT_I(inode
)->i_pos
= 0;
3946 SDFAT_I(inode
)->target
= NULL
;
3947 inode
->i_uid
= sbi
->options
.fs_uid
;
3948 inode
->i_gid
= sbi
->options
.fs_gid
;
3949 inode_inc_iversion(inode
);
3950 inode
->i_generation
= get_seconds();
3952 if (fsapi_read_inode(inode
, &info
) < 0) {
3953 MMSG("%s: failed to read stat!\n", __func__
);
3957 if (info
.Attr
& ATTR_SUBDIR
) { /* directory */
3958 inode
->i_generation
&= ~1;
3959 inode
->i_mode
= sdfat_make_mode(sbi
, info
.Attr
, S_IRWXUGO
);
3960 inode
->i_op
= &sdfat_dir_inode_operations
;
3961 inode
->i_fop
= &sdfat_dir_operations
;
3963 set_nlink(inode
, info
.NumSubdirs
);
3964 } else if (info
.Attr
& ATTR_SYMLINK
) { /* symbolic link */
3965 inode
->i_op
= &sdfat_symlink_inode_operations
;
3966 inode
->i_generation
|= 1;
3967 inode
->i_mode
= sdfat_make_mode(sbi
, info
.Attr
, S_IRWXUGO
);
3968 } else { /* regular file */
3969 inode
->i_generation
|= 1;
3970 inode
->i_mode
= sdfat_make_mode(sbi
, info
.Attr
, S_IRWXUGO
);
3971 inode
->i_op
= &sdfat_file_inode_operations
;
3972 inode
->i_fop
= &sdfat_file_operations
;
3974 if (sbi
->options
.improved_allocation
& SDFAT_ALLOC_DELAY
)
3975 inode
->i_mapping
->a_ops
= &sdfat_da_aops
;
3977 inode
->i_mapping
->a_ops
= &sdfat_aops
;
3979 inode
->i_mapping
->nrpages
= 0;
3984 * Use fid->size instead of info.Size
3985 * because info.Size means the value saved on disk
3987 i_size_write(inode
, size
);
3989 /* ondisk and aligned size should be aligned with block size */
3990 if (size
& (inode
->i_sb
->s_blocksize
- 1)) {
3991 size
|= (inode
->i_sb
->s_blocksize
- 1);
3995 SDFAT_I(inode
)->i_size_aligned
= size
;
3996 SDFAT_I(inode
)->i_size_ondisk
= size
;
3997 sdfat_debug_check_clusters(inode
);
3999 sdfat_save_attr(inode
, info
.Attr
);
4001 inode
->i_blocks
= ((i_size_read(inode
) + (fsi
->cluster_size
- 1))
4002 & ~((loff_t
)fsi
->cluster_size
- 1)) >> inode
->i_blkbits
;
4004 sdfat_time_fat2unix(sbi
, &inode
->i_mtime
, &info
.ModifyTimestamp
);
4005 sdfat_time_fat2unix(sbi
, &inode
->i_ctime
, &info
.CreateTimestamp
);
4006 sdfat_time_fat2unix(sbi
, &inode
->i_atime
, &info
.AccessTimestamp
);
4008 __init_dfr_info(inode
);
4013 static struct inode
*sdfat_build_inode(struct super_block
*sb
,
4014 const FILE_ID_T
*fid
, loff_t i_pos
) {
4015 struct inode
*inode
;
4018 inode
= sdfat_iget(sb
, i_pos
);
4021 inode
= new_inode(sb
);
4023 inode
= ERR_PTR(-ENOMEM
);
4026 inode
->i_ino
= iunique(sb
, SDFAT_ROOT_INO
);
4027 inode_set_iversion(inode
, 1);
4028 err
= sdfat_fill_inode(inode
, fid
);
4031 inode
= ERR_PTR(err
);
4034 sdfat_attach(inode
, i_pos
);
4035 insert_inode_hash(inode
);
4040 static struct inode
*sdfat_alloc_inode(struct super_block
*sb
)
4042 struct sdfat_inode_info
*ei
;
4044 ei
= kmem_cache_alloc(sdfat_inode_cachep
, GFP_NOFS
);
4047 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
4048 init_rwsem(&ei
->truncate_lock
);
4050 return &ei
->vfs_inode
;
4053 static void sdfat_free_inode(struct inode
*inode
)
4055 if (SDFAT_I(inode
)->target
) {
4056 kfree(SDFAT_I(inode
)->target
);
4057 SDFAT_I(inode
)->target
= NULL
;
4060 kmem_cache_free(sdfat_inode_cachep
, SDFAT_I(inode
));
4063 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
4064 /* Use free_inode instead of destroy_inode */
4065 #define sdfat_destroy_inode (NULL)
4066 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
4067 static void sdfat_i_callback(struct rcu_head
*head
)
4069 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
4071 sdfat_free_inode(inode
);
4074 static void sdfat_destroy_inode(struct inode
*inode
)
4076 call_rcu(&inode
->i_rcu
, sdfat_i_callback
);
4079 static void sdfat_destroy_inode(struct inode
*inode
)
4081 sdfat_free_inode(inode
);
4085 static int __sdfat_write_inode(struct inode
*inode
, int sync
)
4087 struct super_block
*sb
= inode
->i_sb
;
4088 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
4091 if (inode
->i_ino
== SDFAT_ROOT_INO
)
4094 info
.Attr
= sdfat_make_attr(inode
);
4095 info
.Size
= i_size_read(inode
);
4097 sdfat_time_unix2fat(sbi
, &inode
->i_mtime
, &info
.ModifyTimestamp
);
4098 sdfat_time_unix2fat(sbi
, &inode
->i_ctime
, &info
.CreateTimestamp
);
4099 sdfat_time_unix2fat(sbi
, &inode
->i_atime
, &info
.AccessTimestamp
);
4101 if (!__support_write_inode_sync(sb
))
4104 /* FIXME : Do we need handling error? */
4105 return fsapi_write_inode(inode
, &info
, sync
);
4108 static int sdfat_sync_inode(struct inode
*inode
)
4110 return __sdfat_write_inode(inode
, 1);
4113 static int sdfat_write_inode(struct inode
*inode
, struct writeback_control
*wbc
)
4115 return __sdfat_write_inode(inode
, wbc
->sync_mode
== WB_SYNC_ALL
);
4118 static void sdfat_evict_inode(struct inode
*inode
)
4120 truncate_inode_pages_final(&inode
->i_data
);
4122 if (!inode
->i_nlink
) {
4123 loff_t old_size
= i_size_read(inode
);
4125 i_size_write(inode
, 0);
4127 SDFAT_I(inode
)->fid
.size
= old_size
;
4129 __cancel_dfr_work(inode
, 0, (loff_t
)old_size
, __func__
);
4131 /* TO CHECK evicting directory works correctly */
4132 MMSG("%s: inode(%p) evict %s (size(%llu) to zero)\n",
4134 S_ISDIR(inode
->i_mode
) ? "directory" : "file",
4136 fsapi_truncate(inode
, old_size
, 0);
4139 invalidate_inode_buffers(inode
);
4141 fsapi_invalidate_extent(inode
);
4142 sdfat_detach(inode
);
4144 /* after end of this function, caller will remove inode hash */
4145 /* remove_inode_hash(inode); */
4148 static void sdfat_free_sb_info(struct sdfat_sb_info
*sbi
)
4150 if (sbi
->nls_disk
) {
4151 unload_nls(sbi
->nls_disk
);
4152 sbi
->nls_disk
= NULL
;
4153 sbi
->options
.codepage
= sdfat_default_codepage
;
4156 unload_nls(sbi
->nls_io
);
4159 if (sbi
->options
.iocharset
!= sdfat_default_iocharset
) {
4160 kfree(sbi
->options
.iocharset
);
4161 sbi
->options
.iocharset
= sdfat_default_iocharset
;
4164 if (sbi
->use_vmalloc
) {
4171 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
4172 static void delayed_free(struct rcu_head
*p
)
4174 struct sdfat_sb_info
*sbi
= container_of(p
, struct sdfat_sb_info
, rcu
);
4176 sdfat_free_sb_info(sbi
);
4179 static void __sdfat_destroy_sb_info(struct super_block
*sb
)
4181 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
4183 call_rcu(&sbi
->rcu
, delayed_free
);
4186 static void __sdfat_destroy_sb_info(struct super_block
*sb
)
4188 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
4190 sdfat_free_sb_info(sbi
);
4191 sb
->s_fs_info
= NULL
;
4195 static void sdfat_destroy_sb_info(struct super_block
*sb
)
4197 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
4199 kobject_del(&sbi
->sb_kobj
);
4200 kobject_put(&sbi
->sb_kobj
);
4202 __sdfat_destroy_sb_info(sb
);
4205 static void sdfat_put_super(struct super_block
*sb
)
4207 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
4210 sdfat_log_msg(sb
, KERN_INFO
, "trying to unmount...");
4212 __cancel_delayed_work_sync(sbi
);
4214 if (__is_sb_dirty(sb
))
4215 sdfat_write_super(sb
);
4217 __free_dfr_mem_if_required(sb
);
4218 err
= fsapi_umount(sb
);
4220 sdfat_destroy_sb_info(sb
);
4222 sdfat_log_msg(sb
, KERN_INFO
, "unmounted successfully! %s",
4223 err
? "(with previous I/O errors)" : "");
4226 static inline void __flush_delayed_meta(struct super_block
*sb
, s32 sync
)
4228 #ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
4229 fsapi_cache_flush(sb
, sync
);
4235 static void sdfat_write_super(struct super_block
*sb
)
4243 #ifdef CONFIG_SDFAT_DFR
4244 if (atomic_read(&(SDFAT_SB(sb
)->dfr_info
.stat
)) == DFR_SB_STAT_VALID
)
4245 fsapi_dfr_update_fat_next(sb
);
4248 /* flush delayed FAT/DIR dirty */
4249 __flush_delayed_meta(sb
, 0);
4252 fsapi_sync_fs(sb
, 0);
4258 /* Issuing bdev requests is needed
4259 * to guarantee DIR updates in time
4260 * whether w/ or w/o delayed DIR dirty feature.
4261 * (otherwise DIR updates could be delayed for 5 + 5 secs at max.)
4263 sync_blockdev(sb
->s_bdev
);
4265 #if (defined(CONFIG_SDFAT_DFR) && defined(CONFIG_SDFAT_DFR_DEBUG))
4267 fsapi_dfr_spo_test(sb
, DFR_SPO_FAT_NEXT
, __func__
);
4269 MMSG("BD: sdfat_write_super (bdev_sync for %ld ms)\n",
4270 (jiffies
- time
) * 1000 / HZ
);
4274 static void __dfr_update_fat_next(struct super_block
*sb
)
4276 #ifdef CONFIG_SDFAT_DFR
4277 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
4279 if (sbi
->options
.defrag
&&
4280 (atomic_read(&sbi
->dfr_info
.stat
) == DFR_SB_STAT_VALID
)) {
4281 fsapi_dfr_update_fat_next(sb
);
4286 static void __dfr_update_fat_prev(struct super_block
*sb
, int wait
)
4288 #ifdef CONFIG_SDFAT_DFR
4289 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
4290 struct defrag_info
*sb_dfr
= &sbi
->dfr_info
;
4291 /* static time available? */
4292 static int time
; /* initialized by zero */
4293 int uevent
= 0, total
= 0, clean
= 0, full
= 0;
4294 int spent
= jiffies
- time
;
4296 if (!(sbi
->options
.defrag
&& wait
))
4300 /* Update FAT for defrag */
4301 if (atomic_read(&(sbi
->dfr_info
.stat
)) == DFR_SB_STAT_VALID
) {
4303 fsapi_dfr_update_fat_prev(sb
, 0);
4305 /* flush delayed FAT/DIR dirty */
4306 __flush_delayed_meta(sb
, 0);
4308 /* Complete defrag req */
4309 fsapi_sync_fs(sb
, 1);
4310 atomic_set(&sb_dfr
->stat
, DFR_SB_STAT_REQ
);
4311 complete_all(&sbi
->dfr_complete
);
4312 } else if (((spent
< 0) || (spent
> DFR_DEFAULT_TIMEOUT
)) &&
4313 (atomic_read(&(sbi
->dfr_info
.stat
)) == DFR_SB_STAT_IDLE
)) {
4314 uevent
= fsapi_dfr_check_dfr_required(sb
, &total
, &clean
, &full
);
4320 kobject_uevent(&SDFAT_SB(sb
)->sb_kobj
, KOBJ_CHANGE
);
4321 dfr_debug("uevent for defrag_daemon, total_au %d, "
4322 "clean_au %d, full_au %d", total
, clean
, full
);
4327 static int sdfat_sync_fs(struct super_block
*sb
, int wait
)
4331 /* If there are some dirty buffers in the bdev inode */
4332 if (__is_sb_dirty(sb
)) {
4336 __dfr_update_fat_next(sb
);
4338 err
= fsapi_sync_fs(sb
, 1);
4340 #if (defined(CONFIG_SDFAT_DFR) && defined(CONFIG_SDFAT_DFR_DEBUG))
4342 fsapi_dfr_spo_test(sb
, DFR_SPO_FAT_NEXT
, __func__
);
4348 __dfr_update_fat_prev(sb
, wait
);
4353 static int sdfat_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
4357 * fixed the slow-call problem because of volume-lock contention.
4359 struct super_block
*sb
= dentry
->d_sb
;
4360 u64 id
= huge_encode_dev(sb
->s_bdev
->bd_dev
);
4361 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
4364 /* fsapi_statfs will try to get a volume lock if needed */
4365 if (fsapi_statfs(sb
, &info
))
4369 sdfat_msg(sb
, KERN_INFO
, "called statfs with previous"
4370 " I/O error(0x%02X).", fsi
->prev_eio
);
4372 buf
->f_type
= sb
->s_magic
;
4373 buf
->f_bsize
= info
.ClusterSize
;
4374 buf
->f_blocks
= info
.NumClusters
;
4375 buf
->f_bfree
= info
.FreeClusters
;
4376 buf
->f_bavail
= info
.FreeClusters
;
4377 buf
->f_fsid
.val
[0] = (u32
)id
;
4378 buf
->f_fsid
.val
[1] = (u32
)(id
>> 32);
4379 /* Unicode utf8 255 characters */
4380 buf
->f_namelen
= MAX_NAME_LENGTH
* MAX_CHARSET_SIZE
;
4385 static int sdfat_remount(struct super_block
*sb
, int *flags
, char *data
)
4387 unsigned long prev_sb_flags
;
4388 char *orig_data
= kstrdup(data
, GFP_KERNEL
);
4389 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
4390 FS_INFO_T
*fsi
= &(sbi
->fsi
);
4392 *flags
|= SB_NODIRATIME
;
4394 prev_sb_flags
= sb
->s_flags
;
4396 sdfat_remount_syncfs(sb
);
4398 fsapi_set_vol_flags(sb
, VOL_CLEAN
, 1);
4400 sdfat_log_msg(sb
, KERN_INFO
, "re-mounted(%s->%s), eio=0x%x, Opts: %s",
4401 (prev_sb_flags
& SB_RDONLY
) ? "ro" : "rw",
4402 (*flags
& SB_RDONLY
) ? "ro" : "rw",
4403 fsi
->prev_eio
, orig_data
);
4408 static int __sdfat_show_options(struct seq_file
*m
, struct super_block
*sb
)
4410 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
4411 struct sdfat_mount_options
*opts
= &sbi
->options
;
4412 FS_INFO_T
*fsi
= &(sbi
->fsi
);
4414 /* Show partition info */
4415 seq_printf(m
, ",fs=%s", sdfat_get_vol_type_str(fsi
->vol_type
));
4417 seq_printf(m
, ",eio=0x%x", fsi
->prev_eio
);
4418 if (!uid_eq(opts
->fs_uid
, GLOBAL_ROOT_UID
))
4419 seq_printf(m
, ",uid=%u",
4420 from_kuid_munged(&init_user_ns
, opts
->fs_uid
));
4421 if (!gid_eq(opts
->fs_gid
, GLOBAL_ROOT_GID
))
4422 seq_printf(m
, ",gid=%u",
4423 from_kgid_munged(&init_user_ns
, opts
->fs_gid
));
4424 seq_printf(m
, ",fmask=%04o", opts
->fs_fmask
);
4425 seq_printf(m
, ",dmask=%04o", opts
->fs_dmask
);
4426 if (opts
->allow_utime
)
4427 seq_printf(m
, ",allow_utime=%04o", opts
->allow_utime
);
4429 seq_printf(m
, ",codepage=%s", sbi
->nls_disk
->charset
);
4431 seq_printf(m
, ",iocharset=%s", sbi
->nls_io
->charset
);
4433 seq_puts(m
, ",utf8");
4434 if (sbi
->fsi
.vol_type
!= EXFAT
)
4435 seq_puts(m
, ",shortname=winnt");
4436 seq_printf(m
, ",namecase=%u", opts
->casesensitive
);
4438 seq_puts(m
, ",tz=UTC");
4439 if (opts
->improved_allocation
& SDFAT_ALLOC_DELAY
)
4440 seq_puts(m
, ",delay");
4441 if (opts
->improved_allocation
& SDFAT_ALLOC_SMART
)
4442 seq_printf(m
, ",smart,ausize=%u", opts
->amap_opt
.sect_per_au
);
4444 seq_puts(m
, ",defrag");
4445 if (opts
->adj_hidsect
)
4446 seq_puts(m
, ",adj_hid");
4448 seq_puts(m
, ",adj_req");
4449 seq_printf(m
, ",symlink=%u", opts
->symlink
);
4450 seq_printf(m
, ",bps=%ld", sb
->s_blocksize
);
4451 if (opts
->errors
== SDFAT_ERRORS_CONT
)
4452 seq_puts(m
, ",errors=continue");
4453 else if (opts
->errors
== SDFAT_ERRORS_PANIC
)
4454 seq_puts(m
, ",errors=panic");
4456 seq_puts(m
, ",errors=remount-ro");
4458 seq_puts(m
, ",discard");
4463 static const struct super_operations sdfat_sops
= {
4464 .alloc_inode
= sdfat_alloc_inode
,
4465 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
4466 .free_inode
= sdfat_free_inode
,
4468 .destroy_inode
= sdfat_destroy_inode
,
4470 .write_inode
= sdfat_write_inode
,
4471 .evict_inode
= sdfat_evict_inode
,
4472 .put_super
= sdfat_put_super
,
4473 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)
4474 .write_super
= sdfat_write_super
,
4476 .sync_fs
= sdfat_sync_fs
,
4477 .statfs
= sdfat_statfs
,
4478 .remount_fs
= sdfat_remount
,
4479 .show_options
= sdfat_show_options
,
4482 /*======================================================================*/
4483 /* SYSFS Operations */
4484 /*======================================================================*/
4485 #define SDFAT_ATTR(name, mode, show, store) \
4486 static struct sdfat_attr sdfat_attr_##name = __ATTR(name, mode, show, store)
4489 struct attribute attr
;
4490 ssize_t (*show
)(struct sdfat_sb_info
*, char *);
4491 ssize_t (*store
)(struct sdfat_sb_info
*, const char *, size_t);
4494 static ssize_t
sdfat_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *buf
)
4496 struct sdfat_sb_info
*sbi
= container_of(kobj
, struct sdfat_sb_info
, sb_kobj
);
4497 struct sdfat_attr
*a
= container_of(attr
, struct sdfat_attr
, attr
);
4499 return a
->show
? a
->show(sbi
, buf
) : 0;
4502 static ssize_t
sdfat_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
4503 const char *buf
, size_t len
)
4505 struct sdfat_sb_info
*sbi
= container_of(kobj
, struct sdfat_sb_info
, sb_kobj
);
4506 struct sdfat_attr
*a
= container_of(attr
, struct sdfat_attr
, attr
);
4508 return a
->store
? a
->store(sbi
, buf
, len
) : len
;
4511 static const struct sysfs_ops sdfat_attr_ops
= {
4512 .show
= sdfat_attr_show
,
4513 .store
= sdfat_attr_store
,
4517 static ssize_t
type_show(struct sdfat_sb_info
*sbi
, char *buf
)
4519 FS_INFO_T
*fsi
= &(sbi
->fsi
);
4521 return snprintf(buf
, PAGE_SIZE
, "%s\n", sdfat_get_vol_type_str(fsi
->vol_type
));
4523 SDFAT_ATTR(type
, 0444, type_show
, NULL
);
4525 static ssize_t
eio_show(struct sdfat_sb_info
*sbi
, char *buf
)
4527 FS_INFO_T
*fsi
= &(sbi
->fsi
);
4529 return snprintf(buf
, PAGE_SIZE
, "0x%x\n", fsi
->prev_eio
);
4531 SDFAT_ATTR(eio
, 0444, eio_show
, NULL
);
4533 static ssize_t
fratio_show(struct sdfat_sb_info
*sbi
, char *buf
)
4535 unsigned int n_total_au
= 0;
4536 unsigned int n_clean_au
= 0;
4537 unsigned int n_full_au
= 0;
4538 unsigned int n_dirty_au
= 0;
4539 unsigned int fr
= 0;
4541 n_total_au
= fsapi_get_au_stat(sbi
->host_sb
, VOL_AU_STAT_TOTAL
);
4542 n_clean_au
= fsapi_get_au_stat(sbi
->host_sb
, VOL_AU_STAT_CLEAN
);
4543 n_full_au
= fsapi_get_au_stat(sbi
->host_sb
, VOL_AU_STAT_FULL
);
4544 n_dirty_au
= n_total_au
- (n_full_au
+ n_clean_au
);
4548 else if (!n_clean_au
)
4551 fr
= (n_dirty_au
* 100) / (n_clean_au
+ n_dirty_au
);
4553 return snprintf(buf
, PAGE_SIZE
, "%u\n", fr
);
4555 SDFAT_ATTR(fratio
, 0444, fratio_show
, NULL
);
4557 static ssize_t
totalau_show(struct sdfat_sb_info
*sbi
, char *buf
)
4559 unsigned int n_au
= 0;
4561 n_au
= fsapi_get_au_stat(sbi
->host_sb
, VOL_AU_STAT_TOTAL
);
4562 return snprintf(buf
, PAGE_SIZE
, "%u\n", n_au
);
4564 SDFAT_ATTR(totalau
, 0444, totalau_show
, NULL
);
4566 static ssize_t
cleanau_show(struct sdfat_sb_info
*sbi
, char *buf
)
4568 unsigned int n_clean_au
= 0;
4570 n_clean_au
= fsapi_get_au_stat(sbi
->host_sb
, VOL_AU_STAT_CLEAN
);
4571 return snprintf(buf
, PAGE_SIZE
, "%u\n", n_clean_au
);
4573 SDFAT_ATTR(cleanau
, 0444, cleanau_show
, NULL
);
4575 static ssize_t
fullau_show(struct sdfat_sb_info
*sbi
, char *buf
)
4577 unsigned int n_full_au
= 0;
4579 n_full_au
= fsapi_get_au_stat(sbi
->host_sb
, VOL_AU_STAT_FULL
);
4580 return snprintf(buf
, PAGE_SIZE
, "%u\n", n_full_au
);
4582 SDFAT_ATTR(fullau
, 0444, fullau_show
, NULL
);
4584 static struct attribute
*sdfat_attrs
[] = {
4585 &sdfat_attr_type
.attr
,
4586 &sdfat_attr_eio
.attr
,
4587 &sdfat_attr_fratio
.attr
,
4588 &sdfat_attr_totalau
.attr
,
4589 &sdfat_attr_cleanau
.attr
,
4590 &sdfat_attr_fullau
.attr
,
4594 static struct kobj_type sdfat_ktype
= {
4595 .default_attrs
= sdfat_attrs
,
4596 .sysfs_ops
= &sdfat_attr_ops
,
4599 static ssize_t
version_show(struct kobject
*kobj
,
4600 struct kobj_attribute
*attr
, char *buff
)
4602 return snprintf(buff
, PAGE_SIZE
, "FS Version %s\n", SDFAT_VERSION
);
4605 static struct kobj_attribute version_attr
= __ATTR_RO(version
);
4607 static struct attribute
*attributes
[] = {
4612 static struct attribute_group attr_group
= {
4613 .attrs
= attributes
,
4616 /*======================================================================*/
4617 /* Super Block Read Operations */
4618 /*======================================================================*/
4649 static const match_table_t sdfat_tokens
= {
4650 {Opt_uid
, "uid=%u"},
4651 {Opt_gid
, "gid=%u"},
4652 {Opt_umask
, "umask=%o"},
4653 {Opt_dmask
, "dmask=%o"},
4654 {Opt_fmask
, "fmask=%o"},
4655 {Opt_allow_utime
, "allow_utime=%o"},
4656 {Opt_codepage
, "codepage=%u"},
4657 {Opt_charset
, "iocharset=%s"},
4659 {Opt_namecase
, "namecase=%u"},
4660 {Opt_tz_utc
, "tz=UTC"},
4661 {Opt_adj_hidsect
, "adj_hid"},
4662 {Opt_delay
, "delay"},
4663 {Opt_smart
, "smart"},
4664 {Opt_ausize
, "ausize=%u"},
4665 {Opt_packing
, "packing=%u"},
4666 {Opt_defrag
, "defrag"},
4667 {Opt_symlink
, "symlink=%u"},
4668 {Opt_debug
, "debug"},
4669 {Opt_err_cont
, "errors=continue"},
4670 {Opt_err_panic
, "errors=panic"},
4671 {Opt_err_ro
, "errors=remount-ro"},
4672 {Opt_discard
, "discard"},
4674 {Opt_adj_req
, "adj_req"},
4678 static int parse_options(struct super_block
*sb
, char *options
, int silent
,
4679 int *debug
, struct sdfat_mount_options
*opts
)
4682 substring_t args
[MAX_OPT_ARGS
];
4686 opts
->fs_uid
= current_uid();
4687 opts
->fs_gid
= current_gid();
4688 opts
->fs_fmask
= opts
->fs_dmask
= current
->fs
->umask
;
4689 opts
->allow_utime
= (unsigned short) -1;
4690 opts
->codepage
= sdfat_default_codepage
;
4691 opts
->iocharset
= sdfat_default_iocharset
;
4692 opts
->casesensitive
= 0;
4694 opts
->adj_hidsect
= 0;
4696 opts
->improved_allocation
= 0;
4697 opts
->amap_opt
.pack_ratio
= 0; // Default packing
4698 opts
->amap_opt
.sect_per_au
= 0;
4699 opts
->amap_opt
.misaligned_sect
= 0;
4701 opts
->errors
= SDFAT_ERRORS_RO
;
4708 while ((p
= strsep(&options
, ",")) != NULL
) {
4713 token
= match_token(p
, sdfat_tokens
, args
);
4716 if (match_int(&args
[0], &option
))
4718 opts
->fs_uid
= make_kuid(current_user_ns(), option
);
4721 if (match_int(&args
[0], &option
))
4723 opts
->fs_gid
= make_kgid(current_user_ns(), option
);
4728 if (match_octal(&args
[0], &option
))
4730 if (token
!= Opt_dmask
)
4731 opts
->fs_fmask
= option
;
4732 if (token
!= Opt_fmask
)
4733 opts
->fs_dmask
= option
;
4735 case Opt_allow_utime
:
4736 if (match_octal(&args
[0], &option
))
4738 opts
->allow_utime
= option
& (S_IWGRP
| S_IWOTH
);
4741 if (match_int(&args
[0], &option
))
4743 opts
->codepage
= option
;
4746 if (opts
->iocharset
!= sdfat_default_iocharset
)
4747 kfree(opts
->iocharset
);
4748 tmpstr
= match_strdup(&args
[0]);
4751 opts
->iocharset
= tmpstr
;
4754 if (match_int(&args
[0], &option
))
4756 opts
->casesensitive
= (option
> 0) ? 1:0;
4761 case Opt_adj_hidsect
:
4762 opts
->adj_hidsect
= 1;
4768 if (match_int(&args
[0], &option
))
4770 opts
->symlink
= option
> 0 ? 1 : 0;
4773 opts
->improved_allocation
|= SDFAT_ALLOC_DELAY
;
4776 opts
->improved_allocation
|= SDFAT_ALLOC_SMART
;
4779 if (match_int(&args
[0], &option
))
4781 if (!is_power_of_2(option
))
4783 opts
->amap_opt
.sect_per_au
= option
;
4784 IMSG("set AU size by option : %u sectors\n", option
);
4787 if (match_int(&args
[0], &option
))
4789 opts
->amap_opt
.pack_ratio
= option
;
4792 #ifdef CONFIG_SDFAT_DFR
4795 IMSG("defragmentation config is not enabled. ignore\n");
4799 opts
->errors
= SDFAT_ERRORS_CONT
;
4802 opts
->errors
= SDFAT_ERRORS_PANIC
;
4805 opts
->errors
= SDFAT_ERRORS_RO
;
4814 tmpstr
= match_strdup(&args
[0]);
4817 for (i
= 0; i
< FS_TYPE_MAX
; i
++) {
4818 if (!strcmp(tmpstr
, FS_TYPE_STR
[i
])) {
4819 opts
->fs_type
= (unsigned char)i
;
4820 sdfat_log_msg(sb
, KERN_ERR
,
4821 "set fs-type by option : %s",
4827 if (i
== FS_TYPE_MAX
) {
4828 sdfat_log_msg(sb
, KERN_ERR
,
4830 "only allow auto, exfat, vfat");
4835 #ifdef CONFIG_SDFAT_ALIGNED_MPAGE_WRITE
4838 IMSG("adjust request config is not enabled. ignore\n");
4843 sdfat_msg(sb
, KERN_ERR
,
4844 "unrecognized mount option \"%s\" "
4845 "or missing value", p
);
4852 if (opts
->allow_utime
== (unsigned short) -1)
4853 opts
->allow_utime
= ~opts
->fs_dmask
& (S_IWGRP
| S_IWOTH
);
4855 if (opts
->utf8
&& strcmp(opts
->iocharset
, sdfat_iocharset_with_utf8
)) {
4856 sdfat_msg(sb
, KERN_WARNING
,
4857 "utf8 enabled, \"iocharset=%s\" is recommended",
4858 sdfat_iocharset_with_utf8
);
4861 if (opts
->discard
) {
4862 struct request_queue
*q
= bdev_get_queue(sb
->s_bdev
);
4864 if (!blk_queue_discard(q
))
4865 sdfat_msg(sb
, KERN_WARNING
,
4866 "mounting with \"discard\" option, but "
4867 "the device does not support discard");
4874 static void sdfat_hash_init(struct super_block
*sb
)
4876 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
4879 spin_lock_init(&sbi
->inode_hash_lock
);
4880 for (i
= 0; i
< SDFAT_HASH_SIZE
; i
++)
4881 INIT_HLIST_HEAD(&sbi
->inode_hashtable
[i
]);
4884 static int sdfat_read_root(struct inode
*inode
)
4886 struct super_block
*sb
= inode
->i_sb
;
4887 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
4888 sdfat_timespec_t ts
;
4889 FS_INFO_T
*fsi
= &(sbi
->fsi
);
4892 ts
= current_time(inode
);
4894 SDFAT_I(inode
)->fid
.dir
.dir
= fsi
->root_dir
;
4895 SDFAT_I(inode
)->fid
.dir
.flags
= 0x01;
4896 SDFAT_I(inode
)->fid
.entry
= -1;
4897 SDFAT_I(inode
)->fid
.start_clu
= fsi
->root_dir
;
4898 SDFAT_I(inode
)->fid
.flags
= 0x01;
4899 SDFAT_I(inode
)->fid
.type
= TYPE_DIR
;
4900 SDFAT_I(inode
)->fid
.version
= 0;
4901 SDFAT_I(inode
)->fid
.rwoffset
= 0;
4902 SDFAT_I(inode
)->fid
.hint_bmap
.off
= CLUS_EOF
;
4903 SDFAT_I(inode
)->fid
.hint_stat
.eidx
= 0;
4904 SDFAT_I(inode
)->fid
.hint_stat
.clu
= fsi
->root_dir
;
4905 SDFAT_I(inode
)->fid
.hint_femp
.eidx
= -1;
4907 SDFAT_I(inode
)->target
= NULL
;
4909 if (fsapi_read_inode(inode
, &info
) < 0)
4912 inode
->i_uid
= sbi
->options
.fs_uid
;
4913 inode
->i_gid
= sbi
->options
.fs_gid
;
4914 inode_inc_iversion(inode
);
4915 inode
->i_generation
= 0;
4916 inode
->i_mode
= sdfat_make_mode(sbi
, ATTR_SUBDIR
, S_IRWXUGO
);
4917 inode
->i_op
= &sdfat_dir_inode_operations
;
4918 inode
->i_fop
= &sdfat_dir_operations
;
4920 i_size_write(inode
, info
.Size
);
4921 SDFAT_I(inode
)->fid
.size
= info
.Size
;
4922 inode
->i_blocks
= ((i_size_read(inode
) + (fsi
->cluster_size
- 1))
4923 & ~((loff_t
)fsi
->cluster_size
- 1)) >> inode
->i_blkbits
;
4924 SDFAT_I(inode
)->i_pos
= ((loff_t
) fsi
->root_dir
<< 32) | 0xffffffff;
4925 SDFAT_I(inode
)->i_size_aligned
= i_size_read(inode
);
4926 SDFAT_I(inode
)->i_size_ondisk
= i_size_read(inode
);
4928 sdfat_save_attr(inode
, ATTR_SUBDIR
);
4929 inode
->i_mtime
= inode
->i_atime
= inode
->i_ctime
= ts
;
4930 set_nlink(inode
, info
.NumSubdirs
+ 2);
4936 static void setup_dops(struct super_block
*sb
)
4938 if (SDFAT_SB(sb
)->options
.casesensitive
== 0)
4939 sb
->s_d_op
= &sdfat_ci_dentry_ops
;
4941 sb
->s_d_op
= &sdfat_dentry_ops
;
4944 static int sdfat_fill_super(struct super_block
*sb
, void *data
, int silent
)
4946 struct inode
*root_inode
= NULL
;
4947 struct sdfat_sb_info
*sbi
;
4951 struct block_device
*bdev
= sb
->s_bdev
;
4952 dev_t bd_dev
= bdev
? bdev
->bd_dev
: 0;
4954 sdfat_log_msg(sb
, KERN_INFO
, "trying to mount...");
4957 * GFP_KERNEL is ok here, because while we do hold the
4958 * supeblock lock, memory pressure can't call back into
4959 * the filesystem, since we're only just about to mount
4960 * it and have no inodes etc active!
4962 sbi
= kzalloc(sizeof(struct sdfat_sb_info
), GFP_KERNEL
);
4964 sdfat_log_msg(sb
, KERN_INFO
,
4965 "trying to alloc sbi with vzalloc()");
4966 sbi
= vzalloc(sizeof(struct sdfat_sb_info
));
4968 sdfat_log_msg(sb
, KERN_ERR
, "failed to mount! (ENOMEM)");
4971 sbi
->use_vmalloc
= 1;
4974 mutex_init(&sbi
->s_vlock
);
4975 sb
->s_fs_info
= sbi
;
4976 sb
->s_flags
|= SB_NODIRATIME
;
4977 sb
->s_magic
= SDFAT_SUPER_MAGIC
;
4978 sb
->s_op
= &sdfat_sops
;
4979 ratelimit_state_init(&sbi
->ratelimit
, DEFAULT_RATELIMIT_INTERVAL
,
4980 DEFAULT_RATELIMIT_BURST
);
4982 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
4983 sb
->s_time_gran
= NSEC_PER_SEC
; /* the same with default */
4984 sb
->s_time_min
= SDFAT_MIN_TIMESTAMP_SECS
;
4985 sb
->s_time_max
= SDFAT_MAX_TIMESTAMP_SECS
;
4988 err
= parse_options(sb
, data
, silent
, &debug
, &sbi
->options
);
4990 sdfat_log_msg(sb
, KERN_ERR
, "failed to parse options");
4994 setup_sdfat_xattr_handler(sb
);
4995 setup_sdfat_sync_super_wq(sb
);
4998 err
= fsapi_mount(sb
);
5000 sdfat_log_msg(sb
, KERN_ERR
, "failed to recognize fat type");
5004 /* set up enough so that it can read an inode */
5005 sdfat_hash_init(sb
);
5008 * The low byte of FAT's first entry must have same value with
5009 * media-field. But in real world, too many devices is
5010 * writing wrong value. So, removed that validity check.
5012 * if (FAT_FIRST_ENT(sb, media) != first)
5016 sprintf(buf
, "cp%d", sbi
->options
.codepage
);
5017 sbi
->nls_disk
= load_nls(buf
);
5018 if (!sbi
->nls_disk
) {
5019 sdfat_log_msg(sb
, KERN_ERR
, "codepage %s not found", buf
);
5023 sbi
->nls_io
= load_nls(sbi
->options
.iocharset
);
5025 sdfat_log_msg(sb
, KERN_ERR
, "IO charset %s not found",
5026 sbi
->options
.iocharset
);
5030 err
= __alloc_dfr_mem_if_required(sb
);
5032 sdfat_log_msg(sb
, KERN_ERR
, "failed to initialize a memory for "
5038 root_inode
= new_inode(sb
);
5040 sdfat_log_msg(sb
, KERN_ERR
, "failed to allocate root inode.");
5044 root_inode
->i_ino
= SDFAT_ROOT_INO
;
5045 inode_set_iversion(root_inode
, 1);
5047 err
= sdfat_read_root(root_inode
);
5049 sdfat_log_msg(sb
, KERN_ERR
, "failed to initialize root inode.");
5053 sdfat_attach(root_inode
, SDFAT_I(root_inode
)->i_pos
);
5054 insert_inode_hash(root_inode
);
5057 sb
->s_root
= __d_make_root(root_inode
);
5059 sdfat_msg(sb
, KERN_ERR
, "failed to get the root dentry");
5064 * Initialize filesystem attributes (for sysfs)
5065 * ex: /sys/fs/sdfat/mmcblk1[179:17]
5067 sbi
->sb_kobj
.kset
= sdfat_kset
;
5068 err
= kobject_init_and_add(&sbi
->sb_kobj
, &sdfat_ktype
, NULL
,
5069 "%s[%d:%d]", sb
->s_id
, MAJOR(bd_dev
), MINOR(bd_dev
));
5071 sdfat_msg(sb
, KERN_ERR
, "Unable to create sdfat attributes for"
5072 " %s[%d:%d](%d)", sb
->s_id
,
5073 MAJOR(bd_dev
), MINOR(bd_dev
), err
);
5077 sdfat_log_msg(sb
, KERN_INFO
, "mounted successfully!");
5079 sdfat_statistics_set_mnt(&sbi
->fsi
);
5080 sdfat_statistics_set_vol_size(sb
);
5084 __free_dfr_mem_if_required(sb
);
5088 sdfat_log_msg(sb
, KERN_INFO
, "failed to mount! (%d)", err
);
5095 unload_nls(sbi
->nls_io
);
5097 unload_nls(sbi
->nls_disk
);
5098 if (sbi
->options
.iocharset
!= sdfat_default_iocharset
)
5099 kfree(sbi
->options
.iocharset
);
5100 sb
->s_fs_info
= NULL
;
5101 if (!sbi
->use_vmalloc
)
5108 static struct dentry
*sdfat_fs_mount(struct file_system_type
*fs_type
,
5109 int flags
, const char *dev_name
, void *data
) {
5110 return mount_bdev(fs_type
, flags
, dev_name
, data
, sdfat_fill_super
);
5113 static void init_once(void *foo
)
5115 struct sdfat_inode_info
*ei
= (struct sdfat_inode_info
*)foo
;
5117 INIT_HLIST_NODE(&ei
->i_hash_fat
);
5118 inode_init_once(&ei
->vfs_inode
);
5121 static int __init
sdfat_init_inodecache(void)
5123 sdfat_inode_cachep
= kmem_cache_create("sdfat_inode_cache",
5124 sizeof(struct sdfat_inode_info
),
5125 0, (SLAB_RECLAIM_ACCOUNT
|SLAB_MEM_SPREAD
),
5127 if (!sdfat_inode_cachep
)
5132 static void sdfat_destroy_inodecache(void)
5134 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
5136 * Make sure all delayed rcu free inodes are flushed before we
5141 kmem_cache_destroy(sdfat_inode_cachep
);
5144 #ifdef CONFIG_SDFAT_DBG_IOCTL
5145 static void sdfat_debug_kill_sb(struct super_block
*sb
)
5147 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
5148 struct block_device
*bdev
= sb
->s_bdev
;
5153 flags
= sbi
->debug_flags
;
5155 if (flags
& SDFAT_DEBUGFLAGS_INVALID_UMOUNT
) {
5156 /* invalidate_bdev drops all device cache include dirty.
5157 * we use this to simulate device removal
5159 fsapi_cache_release(sb
);
5160 invalidate_bdev(bdev
);
5164 kill_block_super(sb
);
5166 #endif /* CONFIG_SDFAT_DBG_IOCTL */
5168 static struct file_system_type sdfat_fs_type
= {
5169 .owner
= THIS_MODULE
,
5171 .mount
= sdfat_fs_mount
,
5172 #ifdef CONFIG_SDFAT_DBG_IOCTL
5173 .kill_sb
= sdfat_debug_kill_sb
,
5175 .kill_sb
= kill_block_super
,
5176 #endif /* CONFIG_SDFAT_DBG_IOCTL */
5177 .fs_flags
= FS_REQUIRES_DEV
,
5180 static int __init
init_sdfat_fs(void)
5184 sdfat_log_version();
5189 sdfat_kset
= kset_create_and_add("sdfat", NULL
, fs_kobj
);
5191 pr_err("[SDFAT] failed to create sdfat kset\n");
5196 err
= sysfs_create_group(&sdfat_kset
->kobj
, &attr_group
);
5198 pr_err("[SDFAT] failed to create sdfat version attributes\n");
5202 err
= sdfat_statistics_init(sdfat_kset
);
5206 err
= sdfat_uevent_init(sdfat_kset
);
5210 err
= sdfat_init_inodecache();
5212 pr_err("[SDFAT] failed to initialize inode cache\n");
5216 err
= register_filesystem(&sdfat_fs_type
);
5218 pr_err("[SDFAT] failed to register filesystem\n");
5224 sdfat_uevent_uninit();
5225 sdfat_statistics_uninit();
5228 sysfs_remove_group(&sdfat_kset
->kobj
, &attr_group
);
5229 kset_unregister(sdfat_kset
);
5233 sdfat_destroy_inodecache();
5236 pr_err("[SDFAT] failed to initialize FS driver(err:%d)\n", err
);
5240 static void __exit
exit_sdfat_fs(void)
5242 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
5244 * Make sure all delayed rcu free inodes are flushed before we
5249 sdfat_uevent_uninit();
5250 sdfat_statistics_uninit();
5253 sysfs_remove_group(&sdfat_kset
->kobj
, &attr_group
);
5254 kset_unregister(sdfat_kset
);
5258 sdfat_destroy_inodecache();
5259 unregister_filesystem(&sdfat_fs_type
);
5264 module_init(init_sdfat_fs
);
5265 module_exit(exit_sdfat_fs
);
5267 MODULE_LICENSE("GPL");
5268 MODULE_DESCRIPTION("FAT/exFAT filesystem support");
5269 MODULE_AUTHOR("Samsung Electronics Co., Ltd.");