2 * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 /************************************************************************/
20 /* PROJECT : exFAT & FAT12/16/32 File System */
22 /* PURPOSE : sdFAT glue layer for supporting VFS */
24 /*----------------------------------------------------------------------*/
28 /************************************************************************/
30 #include <linux/version.h>
31 #include <linux/module.h>
32 #include <linux/init.h>
33 #include <linux/time.h>
34 #include <linux/slab.h>
35 #include <linux/seq_file.h>
36 #include <linux/pagemap.h>
37 #include <linux/mpage.h>
38 #include <linux/buffer_head.h>
39 #include <linux/exportfs.h>
40 #include <linux/mount.h>
41 #include <linux/vfs.h>
42 #include <linux/parser.h>
43 #include <linux/uio.h>
44 #include <linux/writeback.h>
45 #include <linux/log2.h>
46 #include <linux/hash.h>
47 #include <linux/backing-dev.h>
48 #include <linux/sched.h>
49 #include <linux/fs_struct.h>
50 #include <linux/namei.h>
51 #include <linux/bio.h>
52 #include <linux/blkdev.h>
53 #include <linux/swap.h> /* for mark_page_accessed() */
54 #include <linux/vmalloc.h>
55 #include <asm/current.h>
56 #include <asm/unaligned.h>
57 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
58 #include <linux/aio.h>
61 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
62 #error SDFAT only supports linux kernel version 3.0 or higher
68 /* skip iterating emit_dots when dir is empty */
69 #define ITER_POS_FILLED_DOTS (2)
71 /* type index declare at sdfat.h */
72 const char *FS_TYPE_STR
[] = {
78 static struct kset
*sdfat_kset
;
79 static struct kmem_cache
*sdfat_inode_cachep
;
81 static int sdfat_default_codepage
= CONFIG_SDFAT_DEFAULT_CODEPAGE
;
82 static char sdfat_default_iocharset
[] = CONFIG_SDFAT_DEFAULT_IOCHARSET
;
83 static const char sdfat_iocharset_with_utf8
[] = "iso8859-1";
85 #ifdef CONFIG_SDFAT_TRACE_SB_LOCK
86 static unsigned long __lock_jiffies
;
89 static void sdfat_truncate(struct inode
*inode
, loff_t old_size
);
90 static int sdfat_get_block(struct inode
*inode
, sector_t iblock
,
91 struct buffer_head
*bh_result
, int create
);
93 static struct inode
*sdfat_iget(struct super_block
*sb
, loff_t i_pos
);
94 static struct inode
*sdfat_build_inode(struct super_block
*sb
, const FILE_ID_T
*fid
, loff_t i_pos
);
95 static void sdfat_detach(struct inode
*inode
);
96 static void sdfat_attach(struct inode
*inode
, loff_t i_pos
);
97 static inline unsigned long sdfat_hash(loff_t i_pos
);
98 static int __sdfat_write_inode(struct inode
*inode
, int sync
);
99 static int sdfat_sync_inode(struct inode
*inode
);
100 static int sdfat_write_inode(struct inode
*inode
, struct writeback_control
*wbc
);
101 static void sdfat_write_super(struct super_block
*sb
);
102 static void sdfat_write_failed(struct address_space
*mapping
, loff_t to
);
104 static void sdfat_init_namebuf(DENTRY_NAMEBUF_T
*nb
);
105 static int sdfat_alloc_namebuf(DENTRY_NAMEBUF_T
*nb
);
106 static void sdfat_free_namebuf(DENTRY_NAMEBUF_T
*nb
);
108 /*************************************************************************
109 * INNER FUNCTIONS FOR FUNCTIONS WHICH HAS KERNEL VERSION DEPENDENCY
110 *************************************************************************/
111 static void __sdfat_writepage_end_io(struct bio
*bio
, int err
);
112 static inline void __lock_super(struct super_block
*sb
);
113 static inline void __unlock_super(struct super_block
*sb
);
114 static int __sdfat_create(struct inode
*dir
, struct dentry
*dentry
);
115 static int __sdfat_revalidate(struct dentry
*dentry
);
116 static int __sdfat_revalidate_ci(struct dentry
*dentry
, unsigned int flags
);
117 static int __sdfat_file_fsync(struct file
*filp
, loff_t start
, loff_t end
, int datasync
);
118 static struct dentry
*__sdfat_lookup(struct inode
*dir
, struct dentry
*dentry
);
119 static int __sdfat_mkdir(struct inode
*dir
, struct dentry
*dentry
);
120 static int __sdfat_rename(struct inode
*old_dir
, struct dentry
*old_dentry
,
121 struct inode
*new_dir
, struct dentry
*new_dentry
);
122 static int __sdfat_show_options(struct seq_file
*m
, struct super_block
*sb
);
123 static inline ssize_t
__sdfat_blkdev_direct_IO(int rw
, struct kiocb
*iocb
,
124 struct inode
*inode
, void *iov_u
, loff_t offset
,
125 unsigned long nr_segs
);
126 static inline ssize_t
__sdfat_direct_IO(int rw
, struct kiocb
*iocb
,
127 struct inode
*inode
, void *iov_u
, loff_t offset
,
128 loff_t count
, unsigned long nr_segs
);
129 static int __sdfat_d_hash(const struct dentry
*dentry
, struct qstr
*qstr
);
130 static int __sdfat_d_hashi(const struct dentry
*dentry
, struct qstr
*qstr
);
131 static int __sdfat_cmp(const struct dentry
*dentry
, unsigned int len
,
132 const char *str
, const struct qstr
*name
);
133 static int __sdfat_cmpi(const struct dentry
*dentry
, unsigned int len
,
134 const char *str
, const struct qstr
*name
);
136 /*************************************************************************
137 * FUNCTIONS WHICH HAS KERNEL VERSION DEPENDENCY
138 *************************************************************************/
139 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
140 static int sdfat_rename(struct inode
*old_dir
, struct dentry
*old_dentry
,
141 struct inode
*new_dir
, struct dentry
*new_dentry
,
145 * The VFS already checks for existence, so for local filesystems
146 * the RENAME_NOREPLACE implementation is equivalent to plain rename.
147 * Don't support any other flags
149 if (flags
& ~RENAME_NOREPLACE
)
151 return __sdfat_rename(old_dir
, old_dentry
, new_dir
, new_dentry
);
153 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0) */
154 static int sdfat_rename(struct inode
*old_dir
, struct dentry
*old_dentry
,
155 struct inode
*new_dir
, struct dentry
*new_dentry
)
157 return __sdfat_rename(old_dir
, old_dentry
, new_dir
, new_dentry
);
160 static int setattr_prepare(struct dentry
*dentry
, struct iattr
*attr
)
162 struct inode
*inode
= dentry
->d_inode
;
164 return inode_change_ok(inode
, attr
);
169 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
170 static inline void __sdfat_submit_bio_write(struct bio
*bio
)
172 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
176 static inline unsigned int __sdfat_full_name_hash(const struct dentry
*dentry
, const char *name
, unsigned int len
)
178 return full_name_hash(dentry
, name
, len
);
181 static inline unsigned long __sdfat_init_name_hash(const struct dentry
*dentry
)
183 return init_name_hash(dentry
);
185 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) */
186 static inline void __sdfat_submit_bio_write(struct bio
*bio
)
188 submit_bio(WRITE
, bio
);
191 static inline unsigned int __sdfat_full_name_hash(const struct dentry
*unused
, const char *name
, unsigned int len
)
193 return full_name_hash(name
, len
);
196 static inline unsigned long __sdfat_init_name_hash(const struct dentry
*unused
)
198 return init_name_hash();
202 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 21)
204 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 21) */
205 static inline void inode_lock(struct inode
*inode
)
207 mutex_lock(&inode
->i_mutex
);
210 static inline void inode_unlock(struct inode
*inode
)
212 mutex_unlock(&inode
->i_mutex
);
216 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
217 static void sdfat_writepage_end_io(struct bio
*bio
)
219 __sdfat_writepage_end_io(bio
, bio
->bi_error
);
221 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) */
222 static void sdfat_writepage_end_io(struct bio
*bio
, int err
)
224 if (test_bit(BIO_UPTODATE
, &bio
->bi_flags
))
226 __sdfat_writepage_end_io(bio
, err
);
231 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
232 static inline int sdfat_remount_syncfs(struct super_block
*sb
)
237 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) */
238 static inline int sdfat_remount_syncfs(struct super_block
*sb
)
241 * We don`t need to call sync_filesystem(sb),
242 * Because VFS calls it.
249 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
250 static inline sector_t
__sdfat_bio_sector(struct bio
*bio
)
252 return bio
->bi_iter
.bi_sector
;
255 static inline void __sdfat_set_bio_iterate(struct bio
*bio
, sector_t sector
,
256 unsigned int size
, unsigned int idx
, unsigned int done
)
258 struct bvec_iter
*iter
= &(bio
->bi_iter
);
260 iter
->bi_sector
= sector
;
261 iter
->bi_size
= size
;
263 iter
->bi_bvec_done
= done
;
266 static void __sdfat_truncate_pagecache(struct inode
*inode
,
267 loff_t to
, loff_t newsize
)
269 truncate_pagecache(inode
, newsize
);
272 static int sdfat_d_hash(const struct dentry
*dentry
, struct qstr
*qstr
)
274 return __sdfat_d_hash(dentry
, qstr
);
277 static int sdfat_d_hashi(const struct dentry
*dentry
, struct qstr
*qstr
)
279 return __sdfat_d_hashi(dentry
, qstr
);
282 //instead of sdfat_readdir
283 static int sdfat_iterate(struct file
*filp
, struct dir_context
*ctx
)
285 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
286 struct super_block
*sb
= inode
->i_sb
;
287 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
288 FS_INFO_T
*fsi
= &(sbi
->fsi
);
290 DENTRY_NAMEBUF_T
*nb
= &(de
.NameBuf
);
293 int err
= 0, fake_offset
= 0;
295 sdfat_init_namebuf(nb
);
299 if ((fsi
->vol_type
== EXFAT
) || (inode
->i_ino
== SDFAT_ROOT_INO
)) {
300 if (!dir_emit_dots(filp
, ctx
))
302 if (ctx
->pos
== ITER_POS_FILLED_DOTS
) {
307 if (cpos
& (DENTRY_SIZE
- 1)) {
312 /* name buffer should be allocated before use */
313 err
= sdfat_alloc_namebuf(nb
);
317 SDFAT_I(inode
)->fid
.size
= i_size_read(inode
);
318 SDFAT_I(inode
)->fid
.rwoffset
= cpos
>> DENTRY_SIZE_BITS
;
320 if (cpos
>= SDFAT_I(inode
)->fid
.size
)
323 err
= fsapi_readdir(inode
, &de
);
325 // at least we tried to read a sector
326 // move cpos to next sector position (should be aligned)
328 cpos
+= 1 << (sb
->s_blocksize_bits
);
329 cpos
&= ~((u32
)sb
->s_blocksize
-1);
336 cpos
= SDFAT_I(inode
)->fid
.rwoffset
<< DENTRY_SIZE_BITS
;
341 if (!memcmp(nb
->sfn
, DOS_CUR_DIR_NAME
, DOS_NAME_LENGTH
)) {
343 } else if (!memcmp(nb
->sfn
, DOS_PAR_DIR_NAME
, DOS_NAME_LENGTH
)) {
344 inum
= parent_ino(filp
->f_path
.dentry
);
346 loff_t i_pos
= ((loff_t
) SDFAT_I(inode
)->fid
.start_clu
<< 32) |
347 ((SDFAT_I(inode
)->fid
.rwoffset
-1) & 0xffffffff);
348 struct inode
*tmp
= sdfat_iget(sb
, i_pos
);
354 inum
= iunique(sb
, SDFAT_ROOT_INO
);
358 /* Before calling dir_emit(), sb_lock should be released.
359 * Because page fault can occur in dir_emit() when the size of buffer given
360 * from user is larger than one page size
363 if (!dir_emit(ctx
, nb
->lfn
, strlen(nb
->lfn
), inum
,
364 (de
.Attr
& ATTR_SUBDIR
) ? DT_DIR
: DT_REG
))
372 if (!cpos
&& fake_offset
)
373 cpos
= ITER_POS_FILLED_DOTS
;
379 * To improve performance, free namebuf after unlock sb_lock.
380 * If namebuf is not allocated, this function do nothing
382 sdfat_free_namebuf(nb
);
385 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) */
386 static inline sector_t
__sdfat_bio_sector(struct bio
*bio
)
388 return bio
->bi_sector
;
391 static inline void __sdfat_set_bio_iterate(struct bio
*bio
, sector_t sector
,
392 unsigned int size
, unsigned int idx
, unsigned int done
)
394 bio
->bi_sector
= sector
;
396 bio
->bi_size
= size
; //PAGE_SIZE;
399 static void __sdfat_truncate_pagecache(struct inode
*inode
,
400 loff_t to
, loff_t newsize
)
402 truncate_pagecache(inode
, to
, newsize
);
405 static int sdfat_d_hash(const struct dentry
*dentry
,
406 const struct inode
*inode
, struct qstr
*qstr
)
408 return __sdfat_d_hash(dentry
, qstr
);
411 static int sdfat_d_hashi(const struct dentry
*dentry
,
412 const struct inode
*inode
, struct qstr
*qstr
)
414 return __sdfat_d_hashi(dentry
, qstr
);
417 static int sdfat_readdir(struct file
*filp
, void *dirent
, filldir_t filldir
)
419 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
420 struct super_block
*sb
= inode
->i_sb
;
421 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
422 FS_INFO_T
*fsi
= &(sbi
->fsi
);
424 DENTRY_NAMEBUF_T
*nb
= &(de
.NameBuf
);
427 int err
= 0, fake_offset
= 0;
429 sdfat_init_namebuf(nb
);
433 /* Fake . and .. for the root directory. */
434 if ((fsi
->vol_type
== EXFAT
) || (inode
->i_ino
== SDFAT_ROOT_INO
)) {
435 while (cpos
< ITER_POS_FILLED_DOTS
) {
436 if (inode
->i_ino
== SDFAT_ROOT_INO
)
437 inum
= SDFAT_ROOT_INO
;
440 else /* (cpos == 1) */
441 inum
= parent_ino(filp
->f_path
.dentry
);
443 if (filldir(dirent
, "..", cpos
+1, cpos
, inum
, DT_DIR
) < 0)
448 if (cpos
== ITER_POS_FILLED_DOTS
) {
453 if (cpos
& (DENTRY_SIZE
- 1)) {
458 /* name buffer should be allocated before use */
459 err
= sdfat_alloc_namebuf(nb
);
463 SDFAT_I(inode
)->fid
.size
= i_size_read(inode
);
464 SDFAT_I(inode
)->fid
.rwoffset
= cpos
>> DENTRY_SIZE_BITS
;
466 if (cpos
>= SDFAT_I(inode
)->fid
.size
)
469 err
= fsapi_readdir(inode
, &de
);
471 // at least we tried to read a sector
472 // move cpos to next sector position (should be aligned)
474 cpos
+= 1 << (sb
->s_blocksize_bits
);
475 cpos
&= ~((u32
)sb
->s_blocksize
-1);
482 cpos
= SDFAT_I(inode
)->fid
.rwoffset
<< DENTRY_SIZE_BITS
;
487 if (!memcmp(nb
->sfn
, DOS_CUR_DIR_NAME
, DOS_NAME_LENGTH
)) {
489 } else if (!memcmp(nb
->sfn
, DOS_PAR_DIR_NAME
, DOS_NAME_LENGTH
)) {
490 inum
= parent_ino(filp
->f_path
.dentry
);
492 loff_t i_pos
= ((loff_t
) SDFAT_I(inode
)->fid
.start_clu
<< 32) |
493 ((SDFAT_I(inode
)->fid
.rwoffset
-1) & 0xffffffff);
494 struct inode
*tmp
= sdfat_iget(sb
, i_pos
);
500 inum
= iunique(sb
, SDFAT_ROOT_INO
);
504 /* Before calling dir_emit(), sb_lock should be released.
505 * Because page fault can occur in dir_emit() when the size of buffer given
506 * from user is larger than one page size
509 if (filldir(dirent
, nb
->lfn
, strlen(nb
->lfn
), cpos
, inum
,
510 (de
.Attr
& ATTR_SUBDIR
) ? DT_DIR
: DT_REG
) < 0)
518 if (!cpos
&& fake_offset
)
519 cpos
= ITER_POS_FILLED_DOTS
;
525 * To improve performance, free namebuf after unlock sb_lock.
526 * If namebuf is not allocated, this function do nothing
528 sdfat_free_namebuf(nb
);
534 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
536 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0) */
537 static inline struct inode
*file_inode(const struct file
*f
)
539 return f
->f_dentry
->d_inode
;
544 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
545 static inline int __is_sb_dirty(struct super_block
*sb
)
547 return SDFAT_SB(sb
)->s_dirt
;
550 static inline void __set_sb_clean(struct super_block
*sb
)
552 SDFAT_SB(sb
)->s_dirt
= 0;
555 /* Workqueue wrapper for sdfat_write_super () */
556 static void __write_super_delayed(struct work_struct
*work
)
558 struct sdfat_sb_info
*sbi
;
559 struct super_block
*sb
;
561 sbi
= container_of(work
, struct sdfat_sb_info
, write_super_work
.work
);
564 /* XXX: Is this needed? */
565 if (!sb
|| !down_read_trylock(&sb
->s_umount
)) {
566 DMSG("%s: skip delayed work(write_super).\n", __func__
);
570 DMSG("%s: do delayed_work(write_super).\n", __func__
);
572 spin_lock(&sbi
->work_lock
);
573 sbi
->write_super_queued
= 0;
574 spin_unlock(&sbi
->work_lock
);
576 sdfat_write_super(sb
);
578 up_read(&sb
->s_umount
);
581 static void setup_sdfat_sync_super_wq(struct super_block
*sb
)
583 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
585 mutex_init(&sbi
->s_lock
);
586 spin_lock_init(&sbi
->work_lock
);
587 INIT_DELAYED_WORK(&sbi
->write_super_work
, __write_super_delayed
);
591 static inline bool __cancel_delayed_work_sync(struct sdfat_sb_info
*sbi
)
593 return cancel_delayed_work_sync(&sbi
->write_super_work
);
596 static inline void lock_super(struct super_block
*sb
)
598 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
600 mutex_lock(&sbi
->s_lock
);
603 static inline void unlock_super(struct super_block
*sb
)
605 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
607 mutex_unlock(&sbi
->s_lock
);
610 static int sdfat_revalidate(struct dentry
*dentry
, unsigned int flags
)
612 if (flags
& LOOKUP_RCU
)
615 return __sdfat_revalidate(dentry
);
618 static int sdfat_revalidate_ci(struct dentry
*dentry
, unsigned int flags
)
620 if (flags
& LOOKUP_RCU
)
623 return __sdfat_revalidate_ci(dentry
, flags
);
626 static struct inode
*sdfat_iget(struct super_block
*sb
, loff_t i_pos
)
628 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
629 struct sdfat_inode_info
*info
;
630 struct hlist_head
*head
= sbi
->inode_hashtable
+ sdfat_hash(i_pos
);
631 struct inode
*inode
= NULL
;
633 spin_lock(&sbi
->inode_hash_lock
);
634 hlist_for_each_entry(info
, head
, i_hash_fat
) {
635 BUG_ON(info
->vfs_inode
.i_sb
!= sb
);
637 if (i_pos
!= info
->i_pos
)
639 inode
= igrab(&info
->vfs_inode
);
643 spin_unlock(&sbi
->inode_hash_lock
);
646 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0) */
647 static inline int __is_sb_dirty(struct super_block
*sb
)
652 static inline void __set_sb_clean(struct super_block
*sb
)
657 static void setup_sdfat_sync_super_wq(struct super_block
*sb
)
659 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
664 static inline bool __cancel_delayed_work_sync(struct sdfat_sb_info
*sbi
)
670 static inline void clear_inode(struct inode
*inode
)
672 end_writeback(inode
);
675 static int sdfat_revalidate(struct dentry
*dentry
, struct nameidata
*nd
)
677 if (nd
&& nd
->flags
& LOOKUP_RCU
)
680 return __sdfat_revalidate(dentry
);
683 static int sdfat_revalidate_ci(struct dentry
*dentry
, struct nameidata
*nd
)
685 if (nd
&& nd
->flags
& LOOKUP_RCU
)
688 return __sdfat_revalidate_ci(dentry
, nd
? nd
->flags
: 0);
692 static struct inode
*sdfat_iget(struct super_block
*sb
, loff_t i_pos
)
694 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
695 struct sdfat_inode_info
*info
;
696 struct hlist_node
*node
;
697 struct hlist_head
*head
= sbi
->inode_hashtable
+ sdfat_hash(i_pos
);
698 struct inode
*inode
= NULL
;
700 spin_lock(&sbi
->inode_hash_lock
);
701 hlist_for_each_entry(info
, node
, head
, i_hash_fat
) {
702 BUG_ON(info
->vfs_inode
.i_sb
!= sb
);
704 if (i_pos
!= info
->i_pos
)
706 inode
= igrab(&info
->vfs_inode
);
710 spin_unlock(&sbi
->inode_hash_lock
);
716 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
717 static struct dentry
*sdfat_lookup(struct inode
*dir
, struct dentry
*dentry
,
720 return __sdfat_lookup(dir
, dentry
);
722 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0) */
723 static struct dentry
*sdfat_lookup(struct inode
*dir
, struct dentry
*dentry
,
724 struct nameidata
*nd
)
726 return __sdfat_lookup(dir
, dentry
);
731 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
733 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0) */
734 #define GLOBAL_ROOT_UID (0)
735 #define GLOBAL_ROOT_GID (0)
737 static inline bool uid_eq(uid_t left
, uid_t right
)
739 return left
== right
;
742 static inline bool gid_eq(gid_t left
, gid_t right
)
744 return left
== right
;
747 static inline uid_t
from_kuid_munged(struct user_namespace
*to
, uid_t kuid
)
752 static inline gid_t
from_kgid_munged(struct user_namespace
*to
, gid_t kgid
)
757 static inline uid_t
make_kuid(struct user_namespace
*from
, uid_t uid
)
762 static inline gid_t
make_kgid(struct user_namespace
*from
, gid_t gid
)
769 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
770 static struct dentry
*__d_make_root(struct inode
*root_inode
)
772 return d_make_root(root_inode
);
775 static void __sdfat_do_truncate(struct inode
*inode
, loff_t old
, loff_t
new)
777 down_write(&SDFAT_I(inode
)->truncate_lock
);
778 truncate_setsize(inode
, new);
779 sdfat_truncate(inode
, old
);
780 up_write(&SDFAT_I(inode
)->truncate_lock
);
783 static sector_t
sdfat_aop_bmap(struct address_space
*mapping
, sector_t block
)
787 /* sdfat_get_cluster() assumes the requested blocknr isn't truncated. */
788 down_read(&SDFAT_I(mapping
->host
)->truncate_lock
);
789 blocknr
= generic_block_bmap(mapping
, block
, sdfat_get_block
);
790 up_read(&SDFAT_I(mapping
->host
)->truncate_lock
);
794 static int sdfat_mkdir(struct inode
*dir
, struct dentry
*dentry
, umode_t mode
)
796 return __sdfat_mkdir(dir
, dentry
);
799 static int sdfat_show_options(struct seq_file
*m
, struct dentry
*root
)
801 return __sdfat_show_options(m
, root
->d_sb
);
803 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) */
804 static inline void set_nlink(struct inode
*inode
, unsigned int nlink
)
806 inode
->i_nlink
= nlink
;
809 static struct dentry
*__d_make_root(struct inode
*root_inode
)
811 return d_alloc_root(root_inode
);
814 static void __sdfat_do_truncate(struct inode
*inode
, loff_t old
, loff_t
new)
816 truncate_setsize(inode
, new);
817 sdfat_truncate(inode
, old
);
820 static sector_t
sdfat_aop_bmap(struct address_space
*mapping
, sector_t block
)
824 /* sdfat_get_cluster() assumes the requested blocknr isn't truncated. */
825 down_read(&mapping
->host
->i_alloc_sem
);
826 blocknr
= generic_block_bmap(mapping
, block
, sdfat_get_block
);
827 up_read(&mapping
->host
->i_alloc_sem
);
831 static int sdfat_mkdir(struct inode
*dir
, struct dentry
*dentry
, int mode
)
833 return __sdfat_mkdir(dir
, dentry
);
836 static int sdfat_show_options(struct seq_file
*m
, struct vfsmount
*mnt
)
838 return __sdfat_show_options(m
, mnt
->mnt_sb
);
843 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)
844 #define __sdfat_generic_file_fsync(filp, start, end, datasync) \
845 generic_file_fsync(filp, start, end, datasync)
847 static int sdfat_file_fsync(struct file
*filp
, loff_t start
, loff_t end
, int datasync
)
849 return __sdfat_file_fsync(filp
, start
, end
, datasync
);
851 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0) */
852 #define __sdfat_generic_file_fsync(filp, start, end, datasync) \
853 generic_file_fsync(filp, datasync)
854 static int sdfat_file_fsync(struct file
*filp
, int datasync
)
856 return __sdfat_file_fsync(filp
, 0, 0, datasync
);
860 /*************************************************************************
861 * MORE FUNCTIONS WHICH HAS KERNEL VERSION DEPENDENCY
862 *************************************************************************/
863 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
864 static int sdfat_cmp(const struct dentry
*dentry
,
865 unsigned int len
, const char *str
, const struct qstr
*name
)
867 return __sdfat_cmp(dentry
, len
, str
, name
);
870 static int sdfat_cmpi(const struct dentry
*dentry
,
871 unsigned int len
, const char *str
, const struct qstr
*name
)
873 return __sdfat_cmpi(dentry
, len
, str
, name
);
875 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
876 static int sdfat_cmp(const struct dentry
*parent
, const struct dentry
*dentry
,
877 unsigned int len
, const char *str
, const struct qstr
*name
)
879 return __sdfat_cmp(dentry
, len
, str
, name
);
882 static int sdfat_cmpi(const struct dentry
*parent
, const struct dentry
*dentry
,
883 unsigned int len
, const char *str
, const struct qstr
*name
)
885 return __sdfat_cmpi(dentry
, len
, str
, name
);
888 static int sdfat_cmp(const struct dentry
*parent
, const struct inode
*pinode
,
889 const struct dentry
*dentry
, const struct inode
*inode
,
890 unsigned int len
, const char *str
, const struct qstr
*name
)
892 return __sdfat_cmp(dentry
, len
, str
, name
);
895 static int sdfat_cmpi(const struct dentry
*parent
, const struct inode
*pinode
,
896 const struct dentry
*dentry
, const struct inode
*inode
,
897 unsigned int len
, const char *str
, const struct qstr
*name
)
899 return __sdfat_cmpi(dentry
, len
, str
, name
);
903 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
904 static const char *sdfat_follow_link(struct dentry
*dentry
, struct inode
*inode
, struct delayed_call
*done
)
906 struct sdfat_inode_info
*ei
= SDFAT_I(inode
);
908 return (char *)(ei
->target
);
910 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
911 static const char *sdfat_follow_link(struct dentry
*dentry
, void **cookie
)
913 struct sdfat_inode_info
*ei
= SDFAT_I(dentry
->d_inode
);
915 return *cookie
= (char *)(ei
->target
);
918 static void *sdfat_follow_link(struct dentry
*dentry
, struct nameidata
*nd
)
920 struct sdfat_inode_info
*ei
= SDFAT_I(dentry
->d_inode
);
922 nd_set_link(nd
, (char *)(ei
->target
));
928 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
929 static ssize_t
sdfat_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
)
931 struct file
*file
= iocb
->ki_filp
;
932 struct address_space
*mapping
= file
->f_mapping
;
933 struct inode
*inode
= mapping
->host
;
934 size_t count
= iov_iter_count(iter
);
935 int rw
= iov_iter_rw(iter
);
936 loff_t offset
= iocb
->ki_pos
;
938 return __sdfat_direct_IO(rw
, iocb
, inode
,
939 (void *)iter
, offset
, count
, 0 /* UNUSED */);
941 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
942 static ssize_t
sdfat_direct_IO(struct kiocb
*iocb
,
943 struct iov_iter
*iter
,
946 struct file
*file
= iocb
->ki_filp
;
947 struct address_space
*mapping
= file
->f_mapping
;
948 struct inode
*inode
= mapping
->host
;
949 size_t count
= iov_iter_count(iter
);
950 int rw
= iov_iter_rw(iter
);
952 return __sdfat_direct_IO(rw
, iocb
, inode
,
953 (void *)iter
, offset
, count
, 0 /* UNUSED */);
955 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
956 static ssize_t
sdfat_direct_IO(int rw
, struct kiocb
*iocb
,
957 struct iov_iter
*iter
,
960 struct file
*file
= iocb
->ki_filp
;
961 struct address_space
*mapping
= file
->f_mapping
;
962 struct inode
*inode
= mapping
->host
;
963 size_t count
= iov_iter_count(iter
);
965 return __sdfat_direct_IO(rw
, iocb
, inode
,
966 (void *)iter
, offset
, count
, 0 /* UNUSED */);
969 static ssize_t
sdfat_direct_IO(int rw
, struct kiocb
*iocb
,
970 const struct iovec
*iov
, loff_t offset
, unsigned long nr_segs
)
972 struct file
*file
= iocb
->ki_filp
;
973 struct address_space
*mapping
= file
->f_mapping
;
974 struct inode
*inode
= mapping
->host
;
975 size_t count
= iov_length(iov
, nr_segs
);
977 return __sdfat_direct_IO(rw
, iocb
, inode
,
978 (void *)iov
, offset
, count
, nr_segs
);
983 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
984 static inline ssize_t
__sdfat_blkdev_direct_IO(int unused
, struct kiocb
*iocb
,
985 struct inode
*inode
, void *iov_u
, loff_t unused_1
,
986 unsigned long nr_segs
)
988 struct iov_iter
*iter
= (struct iov_iter
*)iov_u
;
990 return blockdev_direct_IO(iocb
, inode
, iter
, sdfat_get_block
);
992 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
993 static inline ssize_t
__sdfat_blkdev_direct_IO(int unused
, struct kiocb
*iocb
,
994 struct inode
*inode
, void *iov_u
, loff_t offset
,
995 unsigned long nr_segs
)
997 struct iov_iter
*iter
= (struct iov_iter
*)iov_u
;
999 return blockdev_direct_IO(iocb
, inode
, iter
, offset
, sdfat_get_block
);
1001 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
1002 static inline ssize_t
__sdfat_blkdev_direct_IO(int rw
, struct kiocb
*iocb
,
1003 struct inode
*inode
, void *iov_u
, loff_t offset
,
1004 unsigned long nr_segs
)
1006 struct iov_iter
*iter
= (struct iov_iter
*)iov_u
;
1008 return blockdev_direct_IO(rw
, iocb
, inode
, iter
,
1009 offset
, sdfat_get_block
);
1011 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
1012 static inline ssize_t
__sdfat_blkdev_direct_IO(int rw
, struct kiocb
*iocb
,
1013 struct inode
*inode
, void *iov_u
, loff_t offset
,
1014 unsigned long nr_segs
)
1016 const struct iovec
*iov
= (const struct iovec
*)iov_u
;
1018 return blockdev_direct_IO(rw
, iocb
, inode
, iov
,
1019 offset
, nr_segs
, sdfat_get_block
);
1022 static inline ssize_t
__sdfat_blkdev_direct_IO(int rw
, struct kiocb
*iocb
,
1023 struct inode
*inode
, void *iov_u
, loff_t offset
,
1024 unsigned long nr_segs
)
1026 const struct iovec
*iov
= (const struct iovec
*)iov_u
;
1028 return blockdev_direct_IO(rw
, iocb
, inode
, inode
->i_sb
->s_bdev
, iov
,
1029 offset
, nr_segs
, sdfat_get_block
, NULL
);
1034 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
1035 static int sdfat_create(struct inode
*dir
, struct dentry
*dentry
, umode_t mode
,
1038 return __sdfat_create(dir
, dentry
);
1040 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
1041 static int sdfat_create(struct inode
*dir
, struct dentry
*dentry
, umode_t mode
,
1042 struct nameidata
*nd
)
1044 return __sdfat_create(dir
, dentry
);
1047 static int sdfat_create(struct inode
*dir
, struct dentry
*dentry
, int mode
,
1048 struct nameidata
*nd
)
1050 return __sdfat_create(dir
, dentry
);
1055 /*************************************************************************
1056 * WRAP FUNCTIONS FOR DEBUGGING
1057 *************************************************************************/
1058 #ifdef CONFIG_SDFAT_TRACE_SB_LOCK
1059 static inline void __lock_super(struct super_block
*sb
)
1062 __lock_jiffies
= jiffies
;
1065 static inline void __unlock_super(struct super_block
*sb
)
1067 int time
= ((jiffies
- __lock_jiffies
) * 1000 / HZ
);
1068 /* FIXME : error message should be modified */
1070 EMSG("lock_super in %s (%d ms)\n", __func__
, time
);
1074 #else /* CONFIG_SDFAT_TRACE_SB_LOCK */
1075 static inline void __lock_super(struct super_block
*sb
)
1080 static inline void __unlock_super(struct super_block
*sb
)
1084 #endif /* CONFIG_SDFAT_TRACE_SB_LOCK */
1086 /*************************************************************************
1088 *************************************************************************/
1089 static inline loff_t
sdfat_make_i_pos(FILE_ID_T
*fid
)
1091 return ((loff_t
) fid
->dir
.dir
<< 32) | (fid
->entry
& 0xffffffff);
1094 /*======================================================================*/
1095 /* Directory Entry Name Buffer Operations */
1096 /*======================================================================*/
1097 static void sdfat_init_namebuf(DENTRY_NAMEBUF_T
*nb
)
1105 static int sdfat_alloc_namebuf(DENTRY_NAMEBUF_T
*nb
)
1107 nb
->lfn
= __getname();
1110 nb
->sfn
= nb
->lfn
+ MAX_VFSNAME_BUF_SIZE
;
1111 nb
->lfnbuf_len
= MAX_VFSNAME_BUF_SIZE
;
1112 nb
->sfnbuf_len
= MAX_VFSNAME_BUF_SIZE
;
1116 static void sdfat_free_namebuf(DENTRY_NAMEBUF_T
*nb
)
1122 sdfat_init_namebuf(nb
);
1125 /*======================================================================*/
1126 /* Directory Entry Operations */
1127 /*======================================================================*/
1128 #define SDFAT_DSTATE_LOCKED (void *)(0xCAFE2016)
1129 #define SDFAT_DSTATE_UNLOCKED (void *)(0x00000000)
1131 static inline void __lock_d_revalidate(struct dentry
*dentry
)
1133 spin_lock(&dentry
->d_lock
);
1134 dentry
->d_fsdata
= SDFAT_DSTATE_LOCKED
;
1135 spin_unlock(&dentry
->d_lock
);
1138 static inline void __unlock_d_revalidate(struct dentry
*dentry
)
1140 spin_lock(&dentry
->d_lock
);
1141 dentry
->d_fsdata
= SDFAT_DSTATE_UNLOCKED
;
1142 spin_unlock(&dentry
->d_lock
);
1145 /* __check_dstate_locked requires dentry->d_lock */
1146 static inline int __check_dstate_locked(struct dentry
*dentry
)
1148 if (dentry
->d_fsdata
== SDFAT_DSTATE_LOCKED
)
1155 * If new entry was created in the parent, it could create the 8.3
1156 * alias (the shortname of logname). So, the parent may have the
1157 * negative-dentry which matches the created 8.3 alias.
1159 * If it happened, the negative dentry isn't actually negative
1160 * anymore. So, drop it.
1162 static int __sdfat_revalidate_common(struct dentry
*dentry
)
1166 spin_lock(&dentry
->d_lock
);
1167 if ((!dentry
->d_inode
) && (!__check_dstate_locked(dentry
) &&
1168 (dentry
->d_time
!= dentry
->d_parent
->d_inode
->i_version
))) {
1171 spin_unlock(&dentry
->d_lock
);
1175 static int __sdfat_revalidate(struct dentry
*dentry
)
1177 /* This is not negative dentry. Always valid. */
1178 if (dentry
->d_inode
)
1180 return __sdfat_revalidate_common(dentry
);
1183 static int __sdfat_revalidate_ci(struct dentry
*dentry
, unsigned int flags
)
1186 * This is not negative dentry. Always valid.
1188 * Note, rename() to existing directory entry will have ->d_inode,
1189 * and will use existing name which isn't specified name by user.
1191 * We may be able to drop this positive dentry here. But dropping
1192 * positive dentry isn't good idea. So it's unsupported like
1193 * rename("filename", "FILENAME") for now.
1195 if (dentry
->d_inode
)
1197 #if 0 /* Blocked below code for lookup_one_len() called by stackable FS */
1199 * This may be nfsd (or something), anyway, we can't see the
1200 * intent of this. So, since this can be for creation, drop it.
1206 * Drop the negative dentry, in order to make sure to use the
1207 * case sensitive name which is specified by user if this is
1210 if (flags
& (LOOKUP_CREATE
| LOOKUP_RENAME_TARGET
))
1212 return __sdfat_revalidate_common(dentry
);
1216 /* returns the length of a struct qstr, ignoring trailing dots */
1217 static unsigned int __sdfat_striptail_len(unsigned int len
, const char *name
)
1219 while (len
&& name
[len
- 1] == '.')
1224 static unsigned int sdfat_striptail_len(const struct qstr
*qstr
)
1226 return __sdfat_striptail_len(qstr
->len
, qstr
->name
);
1230 * Compute the hash for the sdfat name corresponding to the dentry.
1231 * Note: if the name is invalid, we leave the hash code unchanged so
1232 * that the existing dentry can be used. The sdfat fs routines will
1233 * return ENOENT or EINVAL as appropriate.
1235 static int __sdfat_d_hash(const struct dentry
*dentry
, struct qstr
*qstr
)
1237 unsigned int len
= sdfat_striptail_len(qstr
);
1239 qstr
->hash
= __sdfat_full_name_hash(dentry
, qstr
->name
, len
);
1244 * Compute the hash for the sdfat name corresponding to the dentry.
1245 * Note: if the name is invalid, we leave the hash code unchanged so
1246 * that the existing dentry can be used. The sdfat fs routines will
1247 * return ENOENT or EINVAL as appropriate.
1249 static int __sdfat_d_hashi(const struct dentry
*dentry
, struct qstr
*qstr
)
1251 struct nls_table
*t
= SDFAT_SB(dentry
->d_sb
)->nls_io
;
1252 const unsigned char *name
;
1257 len
= sdfat_striptail_len(qstr
);
1259 hash
= __sdfat_init_name_hash(dentry
);
1261 hash
= partial_name_hash(nls_tolower(t
, *name
++), hash
);
1262 qstr
->hash
= end_name_hash(hash
);
1268 * Case sensitive compare of two sdfat names.
1270 static int __sdfat_cmp(const struct dentry
*dentry
, unsigned int len
,
1271 const char *str
, const struct qstr
*name
)
1273 unsigned int alen
, blen
;
1275 /* A filename cannot end in '.' or we treat it like it has none */
1276 alen
= sdfat_striptail_len(name
);
1277 blen
= __sdfat_striptail_len(len
, str
);
1279 if (strncmp(name
->name
, str
, alen
) == 0)
1286 * Case insensitive compare of two sdfat names.
1288 static int __sdfat_cmpi(const struct dentry
*dentry
, unsigned int len
,
1289 const char *str
, const struct qstr
*name
)
1291 struct nls_table
*t
= SDFAT_SB(dentry
->d_sb
)->nls_io
;
1292 unsigned int alen
, blen
;
1294 /* A filename cannot end in '.' or we treat it like it has none */
1295 alen
= sdfat_striptail_len(name
);
1296 blen
= __sdfat_striptail_len(len
, str
);
1298 if (nls_strnicmp(t
, name
->name
, str
, alen
) == 0)
1304 static const struct dentry_operations sdfat_dentry_ops
= {
1305 .d_revalidate
= sdfat_revalidate
,
1306 .d_hash
= sdfat_d_hash
,
1307 .d_compare
= sdfat_cmp
,
1310 static const struct dentry_operations sdfat_ci_dentry_ops
= {
1311 .d_revalidate
= sdfat_revalidate_ci
,
1312 .d_hash
= sdfat_d_hashi
,
1313 .d_compare
= sdfat_cmpi
,
1316 #ifdef CONFIG_SDFAT_DFR
1317 /*----------------------------------------------------------------------*/
1318 /* Defragmentation related */
1319 /*----------------------------------------------------------------------*/
1321 * @fn defrag_cleanup_reqs
1322 * @brief clean-up defrag info depending on error flag
1324 * @param sb super block
1325 * @param error error flag
1327 static void defrag_cleanup_reqs(INOUT
struct super_block
*sb
, IN
int error
)
1329 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
1330 struct defrag_info
*sb_dfr
= &(sbi
->dfr_info
);
1331 struct defrag_info
*ino_dfr
= NULL
, *tmp
= NULL
;
1332 /* sdfat patch 0.96 : sbi->dfr_info crash problem */
1335 /* Clean-up ino_dfr */
1337 list_for_each_entry_safe(ino_dfr
, tmp
, &sb_dfr
->entry
, entry
) {
1338 struct inode
*inode
= &(container_of(ino_dfr
, struct sdfat_inode_info
, dfr_info
)->vfs_inode
);
1340 mutex_lock(&ino_dfr
->lock
);
1342 atomic_set(&ino_dfr
->stat
, DFR_INO_STAT_IDLE
);
1344 list_del(&ino_dfr
->entry
);
1346 ino_dfr
->chunks
= NULL
;
1347 ino_dfr
->nr_chunks
= 0;
1348 INIT_LIST_HEAD(&ino_dfr
->entry
);
1350 BUG_ON(!mutex_is_locked(&ino_dfr
->lock
));
1351 mutex_unlock(&ino_dfr
->lock
);
1357 /* Clean-up sb_dfr */
1358 sb_dfr
->chunks
= NULL
;
1359 sb_dfr
->nr_chunks
= 0;
1360 INIT_LIST_HEAD(&sb_dfr
->entry
);
1362 /* Clear dfr_new_clus page */
1363 memset(sbi
->dfr_new_clus
, 0, PAGE_SIZE
);
1364 sbi
->dfr_new_idx
= 1;
1365 memset(sbi
->dfr_page_wb
, 0, PAGE_SIZE
);
1367 sbi
->dfr_hint_clus
= sbi
->dfr_hint_idx
= sbi
->dfr_reserved_clus
= 0;
1373 * @fn defrag_validate_pages
1374 * @brief validate and mark dirty for victiim pages
1375 * @return 0 on success, -errno otherwise
1376 * @param inode inode
1377 * @param chunk given chunk
1378 * @remark protected by inode_lock and super_lock
1381 defrag_validate_pages(
1382 IN
struct inode
*inode
,
1383 IN
struct defrag_chunk_info
*chunk
)
1385 struct super_block
*sb
= inode
->i_sb
;
1386 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
1387 struct page
*page
= NULL
;
1388 unsigned int i_size
= 0, page_off
= 0, page_nr
= 0;
1389 int buf_i
= 0, i
= 0, err
= 0;
1391 i_size
= i_size_read(inode
);
1392 page_off
= chunk
->f_clus
* PAGES_PER_CLUS(sb
);
1393 page_nr
= (i_size
/ PAGE_SIZE
) + ((i_size
% PAGE_SIZE
) ? 1 : 0);
1394 if ((i_size
<= 0) || (page_nr
<= 0)) {
1395 dfr_err("inode %p, i_size %d, page_nr %d", inode
, i_size
, page_nr
);
1400 * and check its dirty/writeback/mapped state
1403 i
< min((int)(page_nr
- page_off
), (int)(chunk
->nr_clus
* PAGES_PER_CLUS(sb
)));
1405 page
= find_get_page(inode
->i_mapping
, page_off
+ i
);
1407 if (!trylock_page(page
)) {
1413 dfr_debug("get/lock_page() failed, index %d", i
);
1418 sbi
->dfr_pagep
[buf_i
++] = page
;
1419 if (PageError(page
) || !PageUptodate(page
) || PageDirty(page
) ||
1420 PageWriteback(page
) || page_mapped(page
)) {
1421 dfr_debug("page %p, err %d, uptodate %d, "
1422 "dirty %d, wb %d, mapped %d",
1423 page
, PageError(page
), PageUptodate(page
),
1424 PageDirty(page
), PageWriteback(page
),
1430 set_bit((page
->index
& (PAGES_PER_CLUS(sb
) - 1)),
1431 (volatile unsigned long *)&(sbi
->dfr_page_wb
[chunk
->new_idx
+ i
/ PAGES_PER_CLUS(sb
)]));
1437 * All pages in the chunks are valid.
1439 i_size
-= (chunk
->f_clus
* (sbi
->fsi
.cluster_size
));
1440 BUG_ON(((i_size
/ PAGE_SIZE
) + ((i_size
% PAGE_SIZE
) ? 1 : 0)) != (page_nr
- page_off
));
1442 for (i
= 0; i
< buf_i
; i
++) {
1443 struct buffer_head
*bh
= NULL
, *head
= NULL
;
1446 page
= sbi
->dfr_pagep
[i
];
1449 /* Mark dirty in page */
1450 set_page_dirty(page
);
1451 mark_page_accessed(page
);
1453 /* Attach empty BHs */
1454 if (!page_has_buffers(page
))
1455 create_empty_buffers(page
, 1 << inode
->i_blkbits
, 0);
1457 /* Mark dirty in BHs */
1458 bh
= head
= page_buffers(page
);
1459 BUG_ON(!bh
&& !i_size
);
1461 if ((bh_idx
>= 1) && (bh_idx
>= (i_size
>> inode
->i_blkbits
))) {
1462 clear_buffer_dirty(bh
);
1464 if (PageUptodate(page
))
1465 if (!buffer_uptodate(bh
))
1466 set_buffer_uptodate(bh
);
1468 /* Set this bh as delay */
1470 set_buffer_delay(bh
);
1472 mark_buffer_dirty(bh
);
1476 bh
= bh
->b_this_page
;
1477 } while (bh
!= head
);
1479 /* Mark this page accessed */
1480 mark_page_accessed(page
);
1482 i_size
-= PAGE_SIZE
;
1486 /* Unlock and put refs for pages */
1487 for (i
= 0; i
< buf_i
; i
++) {
1488 BUG_ON(!sbi
->dfr_pagep
[i
]);
1489 unlock_page(sbi
->dfr_pagep
[i
]);
1490 put_page(sbi
->dfr_pagep
[i
]);
1492 memset(sbi
->dfr_pagep
, 0, sizeof(PAGE_SIZE
));
1499 * @fn defrag_validate_reqs
1500 * @brief validate defrag requests
1501 * @return negative if all requests not valid, 0 otherwise
1502 * @param sb super block
1503 * @param chunks given chunks
1506 defrag_validate_reqs(
1507 IN
struct super_block
*sb
,
1508 INOUT
struct defrag_chunk_info
*chunks
)
1510 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
1511 struct defrag_info
*sb_dfr
= &(sbi
->dfr_info
);
1512 int i
= 0, err
= 0, err_cnt
= 0;
1514 /* Validate all reqs */
1515 for (i
= REQ_HEADER_IDX
+ 1; i
< sb_dfr
->nr_chunks
; i
++) {
1516 struct defrag_chunk_info
*chunk
= NULL
;
1517 struct inode
*inode
= NULL
;
1518 struct defrag_info
*ino_dfr
= NULL
;
1524 inode
= sdfat_iget(sb
, chunk
->i_pos
);
1526 dfr_debug("inode not found, i_pos %08llx", chunk
->i_pos
);
1527 chunk
->stat
= DFR_CHUNK_STAT_ERR
;
1534 dfr_debug("req[%d] inode %p, i_pos %08llx, f_clus %d, "
1535 "d_clus %08x, nr %d, prev %08x, next %08x",
1536 i
, inode
, chunk
->i_pos
, chunk
->f_clus
, chunk
->d_clus
,
1537 chunk
->nr_clus
, chunk
->prev_clus
, chunk
->next_clus
);
1539 * Lock ordering: inode_lock -> lock_super
1544 /* Check if enough buffers exist for chunk->new_idx */
1545 if ((sbi
->dfr_new_idx
+ chunk
->nr_clus
) >= (PAGE_SIZE
/ sizeof(int))) {
1546 dfr_err("dfr_new_idx %d, chunk->nr_clus %d",
1547 sbi
->dfr_new_idx
, chunk
->nr_clus
);
1552 /* Reserve clusters for defrag with DA */
1553 err
= fsapi_dfr_reserve_clus(sb
, chunk
->nr_clus
);
1557 /* Check clusters */
1558 err
= fsapi_dfr_validate_clus(inode
, chunk
, 0);
1560 fsapi_dfr_reserve_clus(sb
, 0 - chunk
->nr_clus
);
1561 dfr_debug("Cluster validation: err %d", err
);
1566 err
= defrag_validate_pages(inode
, chunk
);
1568 fsapi_dfr_reserve_clus(sb
, 0 - chunk
->nr_clus
);
1569 dfr_debug("Page validation: err %d", err
);
1573 /* Mark IGNORE flag to victim AU */
1574 if (sbi
->options
.improved_allocation
& SDFAT_ALLOC_SMART
)
1575 fsapi_dfr_mark_ignore(sb
, chunk
->d_clus
);
1577 ino_dfr
= &(SDFAT_I(inode
)->dfr_info
);
1578 mutex_lock(&ino_dfr
->lock
);
1580 /* Update chunk info */
1581 chunk
->stat
= DFR_CHUNK_STAT_REQ
;
1582 chunk
->new_idx
= sbi
->dfr_new_idx
;
1584 /* Update ino_dfr info */
1585 if (list_empty(&(ino_dfr
->entry
))) {
1586 list_add_tail(&ino_dfr
->entry
, &sb_dfr
->entry
);
1587 ino_dfr
->chunks
= chunk
;
1590 ino_dfr
->nr_chunks
++;
1592 atomic_set(&ino_dfr
->stat
, DFR_INO_STAT_REQ
);
1594 BUG_ON(!mutex_is_locked(&ino_dfr
->lock
));
1595 mutex_unlock(&ino_dfr
->lock
);
1597 /* Reserved buffers for chunk->new_idx */
1598 sbi
->dfr_new_idx
+= chunk
->nr_clus
;
1602 chunk
->stat
= DFR_CHUNK_STAT_ERR
;
1607 inode_unlock(inode
);
1610 /* Return error if all chunks are invalid */
1611 if (err_cnt
== sb_dfr
->nr_chunks
- 1) {
1612 dfr_debug("%s failed (err_cnt %d)", __func__
, err_cnt
);
1621 * @fn defrag_check_fs_busy
1622 * @brief check if this module busy
1623 * @return 0 when idle, 1 otherwise
1624 * @param sb super block
1625 * @param reserved_clus # of reserved clusters
1626 * @param queued_pages # of queued pages
1629 defrag_check_fs_busy(
1630 IN
struct super_block
*sb
,
1631 OUT
int *reserved_clus
,
1632 OUT
int *queued_pages
)
1634 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
1637 *reserved_clus
= *queued_pages
= 0;
1640 *reserved_clus
= fsi
->reserved_clusters
;
1641 *queued_pages
= atomic_read(&SDFAT_SB(sb
)->stat_n_pages_queued
);
1643 if (*reserved_clus
|| *queued_pages
)
1652 * @fn sdfat_ioctl_defrag_req
1653 * @brief ioctl to send defrag requests
1654 * @return 0 on success, -errno otherwise
1655 * @param inode inode
1656 * @param uarg given requests
1659 sdfat_ioctl_defrag_req(
1660 IN
struct inode
*inode
,
1661 INOUT
unsigned int *uarg
)
1663 struct super_block
*sb
= inode
->i_sb
;
1664 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
1665 struct defrag_info
*sb_dfr
= &(sbi
->dfr_info
);
1666 struct defrag_chunk_header head
;
1667 struct defrag_chunk_info
*chunks
= NULL
;
1668 unsigned int len
= 0;
1670 unsigned long timeout
= 0;
1672 /* Check overlapped defrag */
1673 if (atomic_cmpxchg(&sb_dfr
->stat
, DFR_SB_STAT_IDLE
, DFR_SB_STAT_REQ
)) {
1674 dfr_debug("sb_dfr->stat %d", atomic_read(&sb_dfr
->stat
));
1678 /* Check if defrag required */
1680 if (!fsapi_dfr_check_dfr_required(sb
, NULL
, NULL
, NULL
)) {
1681 dfr_debug("Not enough space left for defrag (err %d)", -ENOSPC
);
1682 atomic_set(&sb_dfr
->stat
, DFR_SB_STAT_IDLE
);
1689 memset(&head
, 0, sizeof(struct defrag_chunk_header
));
1690 err
= copy_from_user(&head
, uarg
, sizeof(struct defrag_chunk_info
));
1693 /* If FS busy, cancel defrag */
1694 if (!(head
.mode
== DFR_MODE_TEST
)) {
1695 int reserved_clus
= 0, queued_pages
= 0;
1697 err
= defrag_check_fs_busy(sb
, &reserved_clus
, &queued_pages
);
1699 dfr_debug("FS busy, cancel defrag (reserved_clus %d, queued_pages %d)",
1700 reserved_clus
, queued_pages
);
1706 /* Total length is saved in the chunk header's nr_chunks field */
1707 len
= head
.nr_chunks
;
1708 ERR_HANDLE2(!len
, err
, -EINVAL
);
1710 dfr_debug("IOC_DFR_REQ started (mode %d, nr_req %d)", head
.mode
, len
- 1);
1711 if (get_order(len
* sizeof(struct defrag_chunk_info
)) > MAX_ORDER
) {
1712 dfr_debug("len %u, sizeof(struct defrag_chunk_info) %lu, MAX_ORDER %d",
1713 len
, sizeof(struct defrag_chunk_info
), MAX_ORDER
);
1717 chunks
= alloc_pages_exact(len
* sizeof(struct defrag_chunk_info
),
1718 GFP_KERNEL
| __GFP_ZERO
);
1719 ERR_HANDLE2(!chunks
, err
, -ENOMEM
)
1721 err
= copy_from_user(chunks
, uarg
, len
* sizeof(struct defrag_chunk_info
));
1724 /* Initialize sb_dfr */
1725 sb_dfr
->chunks
= chunks
;
1726 sb_dfr
->nr_chunks
= len
;
1728 /* Validate reqs & mark defrag/dirty */
1729 err
= defrag_validate_reqs(sb
, sb_dfr
->chunks
);
1732 atomic_set(&sb_dfr
->stat
, DFR_SB_STAT_VALID
);
1734 /* Wait for defrag completion */
1735 if (head
.mode
== DFR_MODE_ONESHOT
)
1737 else if (head
.mode
& DFR_MODE_BACKGROUND
)
1738 timeout
= DFR_DEFAULT_TIMEOUT
;
1740 timeout
= DFR_MIN_TIMEOUT
;
1742 dfr_debug("Wait for completion (timeout %ld)", timeout
);
1743 init_completion(&sbi
->dfr_complete
);
1744 timeout
= wait_for_completion_timeout(&sbi
->dfr_complete
, timeout
);
1747 /* Force defrag_updat_fat() after timeout. */
1748 dfr_debug("Force sync(), mode %d, left-timeout %ld", head
.mode
, timeout
);
1750 down_read(&sb
->s_umount
);
1755 fsapi_dfr_update_fat_next(sb
);
1757 fsapi_sync_fs(sb
, 1);
1759 #ifdef CONFIG_SDFAT_DFR_DEBUG
1761 fsapi_dfr_spo_test(sb
, DFR_SPO_FAT_NEXT
, __func__
);
1764 fsapi_dfr_update_fat_prev(sb
, 1);
1765 fsapi_sync_fs(sb
, 1);
1769 up_read(&sb
->s_umount
);
1772 #ifdef CONFIG_SDFAT_DFR_DEBUG
1774 fsapi_dfr_spo_test(sb
, DFR_SPO_NORMAL
, __func__
);
1778 /* Send DISCARD to clean-ed AUs */
1779 fsapi_dfr_check_discard(sb
);
1781 #ifdef CONFIG_SDFAT_DFR_DEBUG
1783 fsapi_dfr_spo_test(sb
, DFR_SPO_DISCARD
, __func__
);
1786 /* Unmark IGNORE flag to all victim AUs */
1787 fsapi_dfr_unmark_ignore_all(sb
);
1790 err
= copy_to_user(uarg
, sb_dfr
->chunks
, sizeof(struct defrag_chunk_info
) * len
);
1794 /* Clean-up sb_dfr & ino_dfr */
1795 defrag_cleanup_reqs(sb
, err
);
1798 free_pages_exact(chunks
, len
* sizeof(struct defrag_chunk_info
));
1800 /* Set sb_dfr's state as IDLE */
1801 atomic_set(&sb_dfr
->stat
, DFR_SB_STAT_IDLE
);
1803 dfr_debug("IOC_DFR_REQ done (err %d)", err
);
1808 * @fn sdfat_ioctl_defrag_trav
1809 * @brief ioctl to traverse given directory for defrag
1810 * @return 0 on success, -errno otherwise
1811 * @param inode inode
1812 * @param uarg output buffer
1815 sdfat_ioctl_defrag_trav(
1816 IN
struct inode
*inode
,
1817 INOUT
unsigned int *uarg
)
1819 struct super_block
*sb
= inode
->i_sb
;
1820 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
1821 struct defrag_info
*sb_dfr
= &(sbi
->dfr_info
);
1822 struct defrag_trav_arg
*args
= (struct defrag_trav_arg
*) sbi
->dfr_pagep
;
1823 struct defrag_trav_header
*header
= (struct defrag_trav_header
*) args
;
1826 /* Check overlapped defrag */
1827 if (atomic_cmpxchg(&sb_dfr
->stat
, DFR_SB_STAT_IDLE
, DFR_SB_STAT_REQ
)) {
1828 dfr_debug("sb_dfr->stat %d", atomic_read(&sb_dfr
->stat
));
1832 /* Check if defrag required */
1834 if (!fsapi_dfr_check_dfr_required(sb
, NULL
, NULL
, NULL
)) {
1835 dfr_debug("Not enough space left for defrag (err %d)", -ENOSPC
);
1836 atomic_set(&sb_dfr
->stat
, DFR_SB_STAT_IDLE
);
1843 err
= copy_from_user(args
, uarg
, PAGE_SIZE
);
1848 * ROOT directory has i_pos = 0 and start_clus = 0 .
1850 if (!(header
->type
& DFR_TRAV_TYPE_HEADER
)) {
1852 dfr_debug("type %d, i_pos %08llx, start_clus %08x",
1853 header
->type
, header
->i_pos
, header
->start_clus
);
1857 /* If FS busy, cancel defrag */
1858 if (!(header
->type
& DFR_TRAV_TYPE_TEST
)) {
1859 unsigned int reserved_clus
= 0, queued_pages
= 0;
1861 err
= defrag_check_fs_busy(sb
, &reserved_clus
, &queued_pages
);
1863 dfr_debug("FS busy, cancel defrag (reserved_clus %d, queued_pages %d)",
1864 reserved_clus
, queued_pages
);
1870 /* Scan given directory and gather info */
1873 err
= fsapi_dfr_scan_dir(sb
, (void *)args
);
1875 inode_unlock(inode
);
1878 /* Copy the result to user */
1879 err
= copy_to_user(uarg
, args
, PAGE_SIZE
);
1883 memset(sbi
->dfr_pagep
, 0, PAGE_SIZE
);
1885 atomic_set(&sb_dfr
->stat
, DFR_SB_STAT_IDLE
);
1890 * @fn sdfat_ioctl_defrag_info
1891 * @brief ioctl to get HW param info
1892 * @return 0 on success, -errno otherwise
1893 * @param sb super block
1894 * @param uarg output buffer
1897 sdfat_ioctl_defrag_info(
1898 IN
struct super_block
*sb
,
1899 OUT
unsigned int *uarg
)
1901 struct defrag_info_arg info_arg
;
1904 memset(&info_arg
, 0, sizeof(struct defrag_info_arg
));
1907 err
= fsapi_dfr_get_info(sb
, &info_arg
);
1910 dfr_debug("IOC_DFR_INFO: sec_per_au %d, hidden_sectors %d",
1911 info_arg
.sec_per_au
, info_arg
.hidden_sectors
);
1913 err
= copy_to_user(uarg
, &info_arg
, sizeof(struct defrag_info_arg
));
1918 #endif /* CONFIG_SDFAT_DFR */
1920 static inline int __do_dfr_map_cluster(struct inode
*inode
, u32 clu_offset
, unsigned int *clus_ptr
)
1922 #ifdef CONFIG_SDFAT_DFR
1923 return fsapi_dfr_map_clus(inode
, clu_offset
, clus_ptr
);
1929 static inline int __check_dfr_on(struct inode
*inode
, loff_t start
, loff_t end
, const char *fname
)
1931 #ifdef CONFIG_SDFAT_DFR
1932 struct defrag_info
*ino_dfr
= &(SDFAT_I(inode
)->dfr_info
);
1934 if ((atomic_read(&ino_dfr
->stat
) == DFR_INO_STAT_REQ
) &&
1935 fsapi_dfr_check_dfr_on(inode
, start
, end
, 0, fname
))
1941 static inline int __cancel_dfr_work(struct inode
*inode
, loff_t start
, loff_t end
, const char *fname
)
1943 #ifdef CONFIG_SDFAT_DFR
1944 struct defrag_info
*ino_dfr
= &(SDFAT_I(inode
)->dfr_info
);
1946 if (atomic_read(&ino_dfr
->stat
) == DFR_INO_STAT_REQ
)
1947 fsapi_dfr_check_dfr_on(inode
, start
, end
, 1, fname
);
1952 static inline int __dfr_writepage_end_io(struct page
*page
)
1954 #ifdef CONFIG_SDFAT_DFR
1955 struct defrag_info
*ino_dfr
= &(SDFAT_I(page
->mapping
->host
)->dfr_info
);
1957 if (atomic_read(&ino_dfr
->stat
) == DFR_INO_STAT_REQ
)
1958 fsapi_dfr_writepage_endio(page
);
1963 static inline void __init_dfr_info(struct inode
*inode
)
1965 #ifdef CONFIG_SDFAT_DFR
1966 memset(&(SDFAT_I(inode
)->dfr_info
), 0, sizeof(struct defrag_info
));
1967 INIT_LIST_HEAD(&(SDFAT_I(inode
)->dfr_info
.entry
));
1968 mutex_init(&(SDFAT_I(inode
)->dfr_info
.lock
));
1972 static inline int __alloc_dfr_mem_if_required(struct super_block
*sb
)
1974 #ifdef CONFIG_SDFAT_DFR
1975 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
1977 if (!sbi
->options
.defrag
)
1980 memset(&sbi
->dfr_info
, 0, sizeof(struct defrag_info
));
1981 INIT_LIST_HEAD(&(sbi
->dfr_info
.entry
));
1982 mutex_init(&(sbi
->dfr_info
.lock
));
1984 sbi
->dfr_new_clus
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
1985 if (!sbi
->dfr_new_clus
) {
1986 dfr_debug("error %d", -ENOMEM
);
1989 sbi
->dfr_new_idx
= 1;
1991 sbi
->dfr_page_wb
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
1992 if (!sbi
->dfr_page_wb
) {
1993 dfr_debug("error %d", -ENOMEM
);
1997 sbi
->dfr_pagep
= alloc_pages_exact(sizeof(struct page
*) *
1998 PAGES_PER_AU(sb
), GFP_KERNEL
| __GFP_ZERO
);
1999 if (!sbi
->dfr_pagep
) {
2000 dfr_debug("error %d", -ENOMEM
);
2007 static void __free_dfr_mem_if_required(struct super_block
*sb
)
2009 #ifdef CONFIG_SDFAT_DFR
2010 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
2012 if (sbi
->dfr_pagep
) {
2013 free_pages_exact(sbi
->dfr_pagep
, sizeof(struct page
*) * PAGES_PER_AU(sb
));
2014 sbi
->dfr_pagep
= NULL
;
2017 /* thanks for kfree */
2018 kfree(sbi
->dfr_page_wb
);
2019 sbi
->dfr_page_wb
= NULL
;
2021 kfree(sbi
->dfr_new_clus
);
2022 sbi
->dfr_new_clus
= NULL
;
2027 static int sdfat_file_mmap(struct file
*file
, struct vm_area_struct
*vm_struct
)
2029 __cancel_dfr_work(file
->f_mapping
->host
,
2030 (loff_t
)vm_struct
->vm_start
,
2031 (loff_t
)(vm_struct
->vm_end
- 1),
2034 return generic_file_mmap(file
, vm_struct
);
2037 static int sdfat_ioctl_volume_id(struct inode
*dir
)
2039 struct sdfat_sb_info
*sbi
= SDFAT_SB(dir
->i_sb
);
2040 FS_INFO_T
*fsi
= &(sbi
->fsi
);
2045 static int sdfat_dfr_ioctl(struct inode
*inode
, struct file
*filp
,
2046 unsigned int cmd
, unsigned long arg
)
2048 #ifdef CONFIG_SDFAT_DFR
2050 case SDFAT_IOCTL_DFR_INFO
: {
2051 struct super_block
*sb
= inode
->i_sb
;
2052 FS_INFO_T
*fsi
= &SDFAT_SB(sb
)->fsi
;
2053 unsigned int __user
*uarg
= (unsigned int __user
*) arg
;
2056 /* Check FS type (FAT32 only) */
2057 if (fsi
->vol_type
!= FAT32
) {
2058 dfr_err("Defrag not supported, vol_type %d", fsi
->vol_type
);
2064 /* Check if SB's defrag option enabled */
2065 if (!(SDFAT_SB(sb
)->options
.defrag
)) {
2066 dfr_err("Defrag not supported, sbi->options.defrag %d", SDFAT_SB(sb
)->options
.defrag
);
2071 /* Only IOCTL on mount-point allowed */
2072 if (filp
->f_path
.mnt
->mnt_root
!= filp
->f_path
.dentry
) {
2073 dfr_err("IOC_DFR_INFO only allowed on ROOT, root %p, dentry %p",
2074 filp
->f_path
.mnt
->mnt_root
, filp
->f_path
.dentry
);
2080 return sdfat_ioctl_defrag_info(sb
, uarg
);
2082 case SDFAT_IOCTL_DFR_TRAV
: {
2083 struct super_block
*sb
= inode
->i_sb
;
2084 FS_INFO_T
*fsi
= &SDFAT_SB(sb
)->fsi
;
2085 unsigned int __user
*uarg
= (unsigned int __user
*) arg
;
2088 /* Check FS type (FAT32 only) */
2089 if (fsi
->vol_type
!= FAT32
) {
2090 dfr_err("Defrag not supported, vol_type %d", fsi
->vol_type
);
2096 /* Check if SB's defrag option enabled */
2097 if (!(SDFAT_SB(sb
)->options
.defrag
)) {
2098 dfr_err("Defrag not supported, sbi->options.defrag %d", SDFAT_SB(sb
)->options
.defrag
);
2104 return sdfat_ioctl_defrag_trav(inode
, uarg
);
2106 case SDFAT_IOCTL_DFR_REQ
: {
2107 struct super_block
*sb
= inode
->i_sb
;
2108 FS_INFO_T
*fsi
= &SDFAT_SB(sb
)->fsi
;
2109 unsigned int __user
*uarg
= (unsigned int __user
*) arg
;
2113 /* Check if FS_ERROR occurred */
2114 if (sb
->s_flags
& MS_RDONLY
) {
2115 dfr_err("RDONLY partition (err %d)", -EPERM
);
2120 /* Check FS type (FAT32 only) */
2121 if (fsi
->vol_type
!= FAT32
) {
2122 dfr_err("Defrag not supported, vol_type %d", fsi
->vol_type
);
2128 /* Check if SB's defrag option enabled */
2129 if (!(SDFAT_SB(sb
)->options
.defrag
)) {
2130 dfr_err("Defrag not supported, sbi->options.defrag %d", SDFAT_SB(sb
)->options
.defrag
);
2135 /* Only IOCTL on mount-point allowed */
2136 if (filp
->f_path
.mnt
->mnt_root
!= filp
->f_path
.dentry
) {
2137 dfr_err("IOC_DFR_INFO only allowed on ROOT, root %p, dentry %p",
2138 filp
->f_path
.mnt
->mnt_root
, filp
->f_path
.dentry
);
2144 return sdfat_ioctl_defrag_req(inode
, uarg
);
2146 #ifdef CONFIG_SDFAT_DFR_DEBUG
2147 case SDFAT_IOCTL_DFR_SPO_FLAG
: {
2148 struct sdfat_sb_info
*sbi
= SDFAT_SB(inode
->i_sb
);
2151 ret
= get_user(sbi
->dfr_spo_flag
, (int __user
*)arg
);
2152 dfr_debug("dfr_spo_flag %d", sbi
->dfr_spo_flag
);
2156 #endif /* CONFIG_SDFAT_DFR_DEBUG */
2158 #endif /* CONFIG_SDFAT_DFR */
2160 /* Inappropriate ioctl for device */
2164 static int sdfat_dbg_ioctl(struct inode
*inode
, struct file
*filp
,
2165 unsigned int cmd
, unsigned long arg
)
2167 #ifdef CONFIG_SDFAT_DBG_IOCTL
2168 struct super_block
*sb
= inode
->i_sb
;
2169 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
2173 case SDFAT_IOC_GET_DEBUGFLAGS
:
2174 flags
= sbi
->debug_flags
;
2175 return put_user(flags
, (int __user
*)arg
);
2176 case SDFAT_IOC_SET_DEBUGFLAGS
:
2178 if (!capable(CAP_SYS_ADMIN
))
2181 if (get_user(flags
, (int __user
*) arg
))
2185 sbi
->debug_flags
= flags
;
2188 case SDFAT_IOCTL_PANIC
:
2189 panic("ioctl panic for test");
2191 /* COULD NOT REACH HEAR */
2194 #endif /* CONFIG_SDFAT_DBG_IOCTL */
2198 static long sdfat_generic_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
2200 struct inode
*inode
= file_inode(filp
);
2203 if (cmd
== SDFAT_IOCTL_GET_VOLUME_ID
)
2204 return sdfat_ioctl_volume_id(inode
);
2206 err
= sdfat_dfr_ioctl(inode
, filp
, cmd
, arg
);
2210 /* -ENOTTY if inappropriate ioctl for device */
2211 return sdfat_dbg_ioctl(inode
, filp
, cmd
, arg
);
2215 static void __sdfat_writepage_end_io(struct bio
*bio
, int err
)
2217 struct page
*page
= bio
->bi_io_vec
->bv_page
;
2218 struct super_block
*sb
= page
->mapping
->host
->i_sb
;
2220 ASSERT(bio
->bi_vcnt
== 1); /* Single page endio */
2221 ASSERT(bio_data_dir(bio
)); /* Write */
2225 mapping_set_error(page
->mapping
, err
);
2228 __dfr_writepage_end_io(page
);
2230 #ifdef CONFIG_SDFAT_TRACE_IO
2232 //struct sdfat_sb_info *sbi = SDFAT_SB(bio->bi_bdev->bd_super);
2233 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
2235 sbi
->stat_n_pages_written
++;
2236 if (page
->mapping
->host
== sb
->s_bdev
->bd_inode
)
2237 sbi
->stat_n_bdev_pages_written
++;
2239 /* 4 MB = 1024 pages => 0.4 sec (approx.)
2240 * 32 KB = 64 pages => 0.025 sec
2241 * Min. average latency b/w msgs. ~= 0.025 sec
2243 if ((sbi
->stat_n_pages_written
& 63) == 0) {
2244 DMSG("STAT:%u, %u, %u, %u (Sector #: %u)\n",
2245 sbi
->stat_n_pages_added
, sbi
->stat_n_pages_written
,
2246 sbi
->stat_n_bdev_pages_witten
,
2247 sbi
->stat_n_pages_confused
,
2248 (unsigned int)__sdfat_bio_sector(bio
));
2252 end_page_writeback(page
);
2255 // Update trace info.
2256 atomic_dec(&SDFAT_SB(sb
)->stat_n_pages_queued
);
2260 static int __support_write_inode_sync(struct super_block
*sb
)
2262 #ifdef CONFIG_SDFAT_SUPPORT_DIR_SYNC
2263 #ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
2264 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
2266 if (sbi
->fsi
.vol_type
!= EXFAT
)
2275 static int __sdfat_file_fsync(struct file
*filp
, loff_t start
, loff_t end
, int datasync
)
2277 struct inode
*inode
= filp
->f_mapping
->host
;
2278 struct super_block
*sb
= inode
->i_sb
;
2281 res
= __sdfat_generic_file_fsync(filp
, start
, end
, datasync
);
2283 if (!__support_write_inode_sync(sb
))
2284 err
= fsapi_sync_fs(sb
, 1);
2286 return res
? res
: err
;
2290 static const struct file_operations sdfat_dir_operations
= {
2291 .llseek
= generic_file_llseek
,
2292 .read
= generic_read_dir
,
2293 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
2294 .iterate
= sdfat_iterate
,
2295 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) */
2296 .readdir
= sdfat_readdir
,
2298 .fsync
= sdfat_file_fsync
,
2299 .unlocked_ioctl
= sdfat_generic_ioctl
,
2302 static int __sdfat_create(struct inode
*dir
, struct dentry
*dentry
)
2304 struct super_block
*sb
= dir
->i_sb
;
2305 struct inode
*inode
;
2313 TMSG("%s entered\n", __func__
);
2315 ts
= CURRENT_TIME_SEC
;
2317 err
= fsapi_create(dir
, (u8
*) dentry
->d_name
.name
, FM_REGULAR
, &fid
);
2321 __lock_d_revalidate(dentry
);
2324 dir
->i_ctime
= dir
->i_mtime
= dir
->i_atime
= ts
;
2325 if (IS_DIRSYNC(dir
))
2326 (void) sdfat_sync_inode(dir
);
2328 mark_inode_dirty(dir
);
2330 i_pos
= sdfat_make_i_pos(&fid
);
2331 inode
= sdfat_build_inode(sb
, &fid
, i_pos
);
2332 if (IS_ERR(inode
)) {
2333 err
= PTR_ERR(inode
);
2337 inode
->i_mtime
= inode
->i_atime
= inode
->i_ctime
= ts
;
2338 /* timestamp is already written, so mark_inode_dirty() is unneeded. */
2340 d_instantiate(dentry
, inode
);
2342 __unlock_d_revalidate(dentry
);
2344 TMSG("%s exited with err(%d)\n", __func__
, err
);
2346 sdfat_statistics_set_create(fid
.flags
);
2351 static int sdfat_find(struct inode
*dir
, struct qstr
*qname
, FILE_ID_T
*fid
)
2355 if (qname
->len
== 0)
2358 err
= fsapi_lookup(dir
, (u8
*) qname
->name
, fid
);
2365 static int sdfat_d_anon_disconn(struct dentry
*dentry
)
2367 return IS_ROOT(dentry
) && (dentry
->d_flags
& DCACHE_DISCONNECTED
);
2370 static struct dentry
*__sdfat_lookup(struct inode
*dir
, struct dentry
*dentry
)
2372 struct super_block
*sb
= dir
->i_sb
;
2373 struct inode
*inode
;
2374 struct dentry
*alias
;
2382 TMSG("%s entered\n", __func__
);
2383 err
= sdfat_find(dir
, &dentry
->d_name
, &fid
);
2385 if (err
== -ENOENT
) {
2392 i_pos
= sdfat_make_i_pos(&fid
);
2393 inode
= sdfat_build_inode(sb
, &fid
, i_pos
);
2394 if (IS_ERR(inode
)) {
2395 err
= PTR_ERR(inode
);
2399 i_mode
= inode
->i_mode
;
2400 if (S_ISLNK(i_mode
) && !SDFAT_I(inode
)->target
) {
2401 SDFAT_I(inode
)->target
= kmalloc((i_size_read(inode
)+1), GFP_KERNEL
);
2402 if (!SDFAT_I(inode
)->target
) {
2406 fsapi_read_link(dir
, &fid
, SDFAT_I(inode
)->target
, i_size_read(inode
), &ret
);
2407 *(SDFAT_I(inode
)->target
+ i_size_read(inode
)) = '\0';
2410 alias
= d_find_alias(inode
);
2413 * Checking "alias->d_parent == dentry->d_parent" to make sure
2414 * FS is not corrupted (especially double linked dir).
2416 if (alias
&& alias
->d_parent
== dentry
->d_parent
&&
2417 !sdfat_d_anon_disconn(alias
)) {
2420 * Unhashed alias is able to exist because of revalidate()
2421 * called by lookup_fast. You can easily make this status
2422 * by calling create and lookup concurrently
2423 * In such case, we reuse an alias instead of new dentry
2425 if (d_unhashed(alias
)) {
2426 BUG_ON(alias
->d_name
.hash_len
!= dentry
->d_name
.hash_len
);
2427 sdfat_msg(sb
, KERN_INFO
, "rehashed a dentry(%p) "
2428 "in read lookup", alias
);
2431 } else if (!S_ISDIR(i_mode
)) {
2433 * This inode has non anonymous-DCACHE_DISCONNECTED
2434 * dentry. This means, the user did ->lookup() by an
2435 * another name (longname vs 8.3 alias of it) in past.
2437 * Switch to new one for reason of locality if possible.
2439 d_move(alias
, dentry
);
2443 TMSG("%s exited\n", __func__
);
2448 /* initialize d_time even though it is positive dentry */
2449 dentry
->d_time
= dir
->i_version
;
2452 dentry
= d_splice_alias(inode
, dentry
);
2454 TMSG("%s exited\n", __func__
);
2458 TMSG("%s exited with err(%d)\n", __func__
, err
);
2459 return ERR_PTR(err
);
2463 static int sdfat_unlink(struct inode
*dir
, struct dentry
*dentry
)
2465 struct inode
*inode
= dentry
->d_inode
;
2466 struct super_block
*sb
= dir
->i_sb
;
2472 TMSG("%s entered\n", __func__
);
2474 ts
= CURRENT_TIME_SEC
;
2476 SDFAT_I(inode
)->fid
.size
= i_size_read(inode
);
2478 __cancel_dfr_work(inode
, 0, SDFAT_I(inode
)->fid
.size
, __func__
);
2480 err
= fsapi_unlink(dir
, &(SDFAT_I(inode
)->fid
));
2484 __lock_d_revalidate(dentry
);
2487 dir
->i_mtime
= dir
->i_atime
= ts
;
2488 if (IS_DIRSYNC(dir
))
2489 (void) sdfat_sync_inode(dir
);
2491 mark_inode_dirty(dir
);
2494 inode
->i_mtime
= inode
->i_atime
= ts
;
2495 sdfat_detach(inode
);
2496 dentry
->d_time
= dir
->i_version
;
2498 __unlock_d_revalidate(dentry
);
2500 TMSG("%s exited with err(%d)\n", __func__
, err
);
2504 static int sdfat_symlink(struct inode
*dir
, struct dentry
*dentry
, const char *target
)
2506 struct super_block
*sb
= dir
->i_sb
;
2507 struct inode
*inode
;
2512 u64 len
= (u64
) strlen(target
);
2515 /* symlink option check */
2516 if (!SDFAT_SB(sb
)->options
.symlink
)
2521 TMSG("%s entered\n", __func__
);
2523 ts
= CURRENT_TIME_SEC
;
2525 err
= fsapi_create(dir
, (u8
*) dentry
->d_name
.name
, FM_SYMLINK
, &fid
);
2529 err
= fsapi_write_link(dir
, &fid
, (char *) target
, len
, &ret
);
2532 fsapi_remove(dir
, &fid
);
2536 __lock_d_revalidate(dentry
);
2539 dir
->i_ctime
= dir
->i_mtime
= dir
->i_atime
= ts
;
2540 if (IS_DIRSYNC(dir
))
2541 (void) sdfat_sync_inode(dir
);
2543 mark_inode_dirty(dir
);
2545 i_pos
= sdfat_make_i_pos(&fid
);
2546 inode
= sdfat_build_inode(sb
, &fid
, i_pos
);
2547 if (IS_ERR(inode
)) {
2548 err
= PTR_ERR(inode
);
2552 inode
->i_mtime
= inode
->i_atime
= inode
->i_ctime
= ts
;
2553 /* timestamp is already written, so mark_inode_dirty() is unneeded. */
2555 SDFAT_I(inode
)->target
= kmalloc((len
+1), GFP_KERNEL
);
2556 if (!SDFAT_I(inode
)->target
) {
2560 memcpy(SDFAT_I(inode
)->target
, target
, len
+1);
2562 d_instantiate(dentry
, inode
);
2564 __unlock_d_revalidate(dentry
);
2566 TMSG("%s exited with err(%d)\n", __func__
, err
);
2571 static int __sdfat_mkdir(struct inode
*dir
, struct dentry
*dentry
)
2573 struct super_block
*sb
= dir
->i_sb
;
2574 struct inode
*inode
;
2582 TMSG("%s entered\n", __func__
);
2584 ts
= CURRENT_TIME_SEC
;
2586 err
= fsapi_mkdir(dir
, (u8
*) dentry
->d_name
.name
, &fid
);
2590 __lock_d_revalidate(dentry
);
2593 dir
->i_ctime
= dir
->i_mtime
= dir
->i_atime
= ts
;
2594 if (IS_DIRSYNC(dir
))
2595 (void) sdfat_sync_inode(dir
);
2597 mark_inode_dirty(dir
);
2600 i_pos
= sdfat_make_i_pos(&fid
);
2601 inode
= sdfat_build_inode(sb
, &fid
, i_pos
);
2602 if (IS_ERR(inode
)) {
2603 err
= PTR_ERR(inode
);
2607 inode
->i_mtime
= inode
->i_atime
= inode
->i_ctime
= ts
;
2608 /* timestamp is already written, so mark_inode_dirty() is unneeded. */
2610 d_instantiate(dentry
, inode
);
2613 __unlock_d_revalidate(dentry
);
2615 TMSG("%s exited with err(%d)\n", __func__
, err
);
2617 sdfat_statistics_set_mkdir(fid
.flags
);
2622 static int sdfat_rmdir(struct inode
*dir
, struct dentry
*dentry
)
2624 struct inode
*inode
= dentry
->d_inode
;
2625 struct super_block
*sb
= dir
->i_sb
;
2631 TMSG("%s entered\n", __func__
);
2633 ts
= CURRENT_TIME_SEC
;
2635 SDFAT_I(inode
)->fid
.size
= i_size_read(inode
);
2637 err
= fsapi_rmdir(dir
, &(SDFAT_I(inode
)->fid
));
2641 __lock_d_revalidate(dentry
);
2644 dir
->i_mtime
= dir
->i_atime
= ts
;
2645 if (IS_DIRSYNC(dir
))
2646 (void) sdfat_sync_inode(dir
);
2648 mark_inode_dirty(dir
);
2652 inode
->i_mtime
= inode
->i_atime
= ts
;
2653 sdfat_detach(inode
);
2654 dentry
->d_time
= dir
->i_version
;
2656 __unlock_d_revalidate(dentry
);
2658 TMSG("%s exited with err(%d)\n", __func__
, err
);
2662 static int __sdfat_rename(struct inode
*old_dir
, struct dentry
*old_dentry
,
2663 struct inode
*new_dir
, struct dentry
*new_dentry
)
2665 struct inode
*old_inode
, *new_inode
;
2666 struct super_block
*sb
= old_dir
->i_sb
;
2673 TMSG("%s entered\n", __func__
);
2675 old_inode
= old_dentry
->d_inode
;
2676 new_inode
= new_dentry
->d_inode
;
2678 ts
= CURRENT_TIME_SEC
;
2680 SDFAT_I(old_inode
)->fid
.size
= i_size_read(old_inode
);
2682 __cancel_dfr_work(old_inode
, 0, 1, __func__
);
2684 err
= fsapi_rename(old_dir
, &(SDFAT_I(old_inode
)->fid
), new_dir
, new_dentry
);
2688 __lock_d_revalidate(old_dentry
);
2689 __lock_d_revalidate(new_dentry
);
2691 new_dir
->i_version
++;
2692 new_dir
->i_ctime
= new_dir
->i_mtime
= new_dir
->i_atime
= ts
;
2693 if (IS_DIRSYNC(new_dir
))
2694 (void) sdfat_sync_inode(new_dir
);
2696 mark_inode_dirty(new_dir
);
2698 i_pos
= sdfat_make_i_pos(&(SDFAT_I(old_inode
)->fid
));
2699 sdfat_detach(old_inode
);
2700 sdfat_attach(old_inode
, i_pos
);
2701 if (IS_DIRSYNC(new_dir
))
2702 (void) sdfat_sync_inode(old_inode
);
2704 mark_inode_dirty(old_inode
);
2706 if ((S_ISDIR(old_inode
->i_mode
)) && (old_dir
!= new_dir
)) {
2707 drop_nlink(old_dir
);
2712 old_dir
->i_version
++;
2713 old_dir
->i_ctime
= old_dir
->i_mtime
= ts
;
2714 if (IS_DIRSYNC(old_dir
))
2715 (void) sdfat_sync_inode(old_dir
);
2717 mark_inode_dirty(old_dir
);
2720 sdfat_detach(new_inode
);
2722 /* skip drop_nlink if new_inode already has been dropped */
2723 if (new_inode
->i_nlink
) {
2724 drop_nlink(new_inode
);
2725 if (S_ISDIR(new_inode
->i_mode
))
2726 drop_nlink(new_inode
);
2728 EMSG("%s : abnormal access to an inode dropped\n",
2730 WARN_ON(new_inode
->i_nlink
== 0);
2732 new_inode
->i_ctime
= ts
;
2734 (void) sdfat_sync_inode(new_inode
);
2739 __unlock_d_revalidate(old_dentry
);
2740 __unlock_d_revalidate(new_dentry
);
2742 TMSG("%s exited with err(%d)\n", __func__
, err
);
2746 static int sdfat_cont_expand(struct inode
*inode
, loff_t size
)
2748 struct address_space
*mapping
= inode
->i_mapping
;
2749 loff_t start
= i_size_read(inode
), count
= size
- i_size_read(inode
);
2752 err
= generic_cont_expand_simple(inode
, size
);
2756 inode
->i_ctime
= inode
->i_mtime
= CURRENT_TIME_SEC
;
2757 mark_inode_dirty(inode
);
2759 if (!IS_SYNC(inode
))
2762 err
= filemap_fdatawrite_range(mapping
, start
, start
+ count
- 1);
2763 err2
= sync_mapping_buffers(mapping
);
2764 err
= (err
)?(err
):(err2
);
2765 err2
= write_inode_now(inode
, 1);
2766 err
= (err
)?(err
):(err2
);
2770 return filemap_fdatawait_range(mapping
, start
, start
+ count
- 1);
2773 static int sdfat_allow_set_time(struct sdfat_sb_info
*sbi
, struct inode
*inode
)
2775 mode_t allow_utime
= sbi
->options
.allow_utime
;
2777 if (!uid_eq(current_fsuid(), inode
->i_uid
)) {
2778 if (in_group_p(inode
->i_gid
))
2780 if (allow_utime
& MAY_WRITE
)
2784 /* use a default check */
2788 static int sdfat_sanitize_mode(const struct sdfat_sb_info
*sbi
,
2789 struct inode
*inode
, umode_t
*mode_ptr
)
2791 mode_t i_mode
, mask
, perm
;
2793 i_mode
= inode
->i_mode
;
2795 if (S_ISREG(i_mode
) || S_ISLNK(i_mode
))
2796 mask
= sbi
->options
.fs_fmask
;
2798 mask
= sbi
->options
.fs_dmask
;
2800 perm
= *mode_ptr
& ~(S_IFMT
| mask
);
2802 /* Of the r and x bits, all (subject to umask) must be present.*/
2803 if ((perm
& (S_IRUGO
| S_IXUGO
)) != (i_mode
& (S_IRUGO
| S_IXUGO
)))
2806 if (sdfat_mode_can_hold_ro(inode
)) {
2807 /* Of the w bits, either all (subject to umask) or none must be present. */
2808 if ((perm
& S_IWUGO
) && ((perm
& S_IWUGO
) != (S_IWUGO
& ~mask
)))
2811 /* If sdfat_mode_can_hold_ro(inode) is false, can't change w bits. */
2812 if ((perm
& S_IWUGO
) != (S_IWUGO
& ~mask
))
2816 *mode_ptr
&= S_IFMT
| perm
;
2821 static int sdfat_setattr(struct dentry
*dentry
, struct iattr
*attr
)
2824 struct sdfat_sb_info
*sbi
= SDFAT_SB(dentry
->d_sb
);
2825 struct inode
*inode
= dentry
->d_inode
;
2826 unsigned int ia_valid
;
2830 TMSG("%s entered\n", __func__
);
2832 if ((attr
->ia_valid
& ATTR_SIZE
)
2833 && (attr
->ia_size
> i_size_read(inode
))) {
2834 error
= sdfat_cont_expand(inode
, attr
->ia_size
);
2835 if (error
|| attr
->ia_valid
== ATTR_SIZE
)
2837 attr
->ia_valid
&= ~ATTR_SIZE
;
2840 /* Check for setting the inode time. */
2841 ia_valid
= attr
->ia_valid
;
2842 if ((ia_valid
& (ATTR_MTIME_SET
| ATTR_ATIME_SET
| ATTR_TIMES_SET
))
2843 && sdfat_allow_set_time(sbi
, inode
)) {
2844 attr
->ia_valid
&= ~(ATTR_MTIME_SET
| ATTR_ATIME_SET
| ATTR_TIMES_SET
);
2847 error
= setattr_prepare(dentry
, attr
);
2848 attr
->ia_valid
= ia_valid
;
2852 if (((attr
->ia_valid
& ATTR_UID
) &&
2853 (!uid_eq(attr
->ia_uid
, sbi
->options
.fs_uid
))) ||
2854 ((attr
->ia_valid
& ATTR_GID
) &&
2855 (!gid_eq(attr
->ia_gid
, sbi
->options
.fs_gid
))) ||
2856 ((attr
->ia_valid
& ATTR_MODE
) &&
2857 (attr
->ia_mode
& ~(S_IFREG
| S_IFLNK
| S_IFDIR
| S_IRWXUGO
)))) {
2862 * We don't return -EPERM here. Yes, strange, but this is too
2865 if (attr
->ia_valid
& ATTR_MODE
) {
2866 if (sdfat_sanitize_mode(sbi
, inode
, &attr
->ia_mode
) < 0)
2867 attr
->ia_valid
&= ~ATTR_MODE
;
2870 SDFAT_I(inode
)->fid
.size
= i_size_read(inode
);
2872 /* patch 1.2.0 : fixed the problem of size mismatch. */
2873 if (attr
->ia_valid
& ATTR_SIZE
) {
2874 old_size
= i_size_read(inode
);
2876 /* TO CHECK evicting directory works correctly */
2877 MMSG("%s: inode(%p) truncate size (%llu->%llu)\n", __func__
,
2878 inode
, (u64
)old_size
, (u64
)attr
->ia_size
);
2879 __sdfat_do_truncate(inode
, old_size
, attr
->ia_size
);
2881 setattr_copy(inode
, attr
);
2882 mark_inode_dirty(inode
);
2885 TMSG("%s exited with err(%d)\n", __func__
, error
);
2889 static int sdfat_getattr(struct vfsmount
*mnt
, struct dentry
*dentry
, struct kstat
*stat
)
2891 struct inode
*inode
= dentry
->d_inode
;
2893 TMSG("%s entered\n", __func__
);
2895 generic_fillattr(inode
, stat
);
2896 stat
->blksize
= SDFAT_SB(inode
->i_sb
)->fsi
.cluster_size
;
2898 TMSG("%s exited\n", __func__
);
2902 static const struct inode_operations sdfat_dir_inode_operations
= {
2903 .create
= sdfat_create
,
2904 .lookup
= sdfat_lookup
,
2905 .unlink
= sdfat_unlink
,
2906 .symlink
= sdfat_symlink
,
2907 .mkdir
= sdfat_mkdir
,
2908 .rmdir
= sdfat_rmdir
,
2909 .rename
= sdfat_rename
,
2910 .setattr
= sdfat_setattr
,
2911 .getattr
= sdfat_getattr
,
2912 #ifdef CONFIG_SDFAT_VIRTUAL_XATTR
2913 .listxattr
= sdfat_listxattr
,
2914 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
2915 .setxattr
= sdfat_setxattr
,
2916 .getxattr
= sdfat_getxattr
,
2917 .removexattr
= sdfat_removexattr
,
2922 /*======================================================================*/
2923 /* File Operations */
2924 /*======================================================================*/
2925 static const struct inode_operations sdfat_symlink_inode_operations
= {
2926 .readlink
= generic_readlink
,
2927 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
2928 .get_link
= sdfat_follow_link
,
2929 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) */
2930 .follow_link
= sdfat_follow_link
,
2932 #ifdef CONFIG_SDFAT_VIRTUAL_XATTR
2933 .listxattr
= sdfat_listxattr
,
2934 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
2935 .setxattr
= sdfat_setxattr
,
2936 .getxattr
= sdfat_getxattr
,
2937 .removexattr
= sdfat_removexattr
,
2942 static int sdfat_file_release(struct inode
*inode
, struct file
*filp
)
2944 struct super_block
*sb
= inode
->i_sb
;
2946 /* Moved below code from sdfat_write_inode
2947 * TO FIX size-mismatch problem.
2949 /* FIXME : Added bug_on to confirm that there is no size mismatch */
2950 sdfat_debug_bug_on(SDFAT_I(inode
)->fid
.size
!= i_size_read(inode
));
2951 SDFAT_I(inode
)->fid
.size
= i_size_read(inode
);
2952 fsapi_sync_fs(sb
, 0);
2956 static const struct file_operations sdfat_file_operations
= {
2957 .llseek
= generic_file_llseek
,
2958 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
2959 .read_iter
= generic_file_read_iter
,
2960 .write_iter
= generic_file_write_iter
,
2961 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
2962 .read
= new_sync_read
,
2963 .write
= new_sync_write
,
2964 .read_iter
= generic_file_read_iter
,
2965 .write_iter
= generic_file_write_iter
,
2966 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) */
2967 .read
= do_sync_read
,
2968 .write
= do_sync_write
,
2969 .aio_read
= generic_file_aio_read
,
2970 .aio_write
= generic_file_aio_write
,
2972 .mmap
= sdfat_file_mmap
,
2973 .release
= sdfat_file_release
,
2974 .unlocked_ioctl
= sdfat_generic_ioctl
,
2975 .fsync
= sdfat_file_fsync
,
2976 .splice_read
= generic_file_splice_read
,
2979 static const struct address_space_operations sdfat_da_aops
;
2980 static const struct address_space_operations sdfat_aops
;
2982 static void sdfat_truncate(struct inode
*inode
, loff_t old_size
)
2984 struct super_block
*sb
= inode
->i_sb
;
2985 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
2986 FS_INFO_T
*fsi
= &(sbi
->fsi
);
2987 unsigned int blocksize
= 1 << inode
->i_blkbits
;
2988 loff_t aligned_size
;
2993 if (SDFAT_I(inode
)->fid
.start_clu
== 0) {
2994 /* Stange statement:
2995 * Empty start_clu != ~0 (not allocated)
2997 sdfat_fs_error(sb
, "tried to truncate zeroed cluster.");
3001 sdfat_debug_check_clusters(inode
);
3003 __cancel_dfr_work(inode
, (loff_t
)i_size_read(inode
), (loff_t
)old_size
, __func__
);
3005 err
= fsapi_truncate(inode
, old_size
, i_size_read(inode
));
3009 inode
->i_ctime
= inode
->i_mtime
= CURRENT_TIME_SEC
;
3010 if (IS_DIRSYNC(inode
))
3011 (void) sdfat_sync_inode(inode
);
3013 mark_inode_dirty(inode
);
3016 // inode->i_blocks = ((SDFAT_I(inode)->i_size_ondisk + (fsi->cluster_size - 1))
3017 inode
->i_blocks
= ((i_size_read(inode
) + (fsi
->cluster_size
- 1)) &
3018 ~((loff_t
)fsi
->cluster_size
- 1)) >> inode
->i_blkbits
;
3021 * This protects against truncating a file bigger than it was then
3022 * trying to write into the hole.
3024 * comment by sh.hong:
3025 * This seems to mean 'intra page/block' truncate and writing.
3026 * I couldn't find a reason to change the values prior to fsapi_truncate
3027 * Therefore, I switched the order of operations
3028 * so that it's possible to utilize i_size_ondisk in fsapi_truncate
3031 aligned_size
= i_size_read(inode
);
3032 if (aligned_size
& (blocksize
- 1)) {
3033 aligned_size
|= (blocksize
- 1);
3037 if (SDFAT_I(inode
)->i_size_ondisk
> i_size_read(inode
))
3038 SDFAT_I(inode
)->i_size_ondisk
= aligned_size
;
3040 sdfat_debug_check_clusters(inode
);
3042 if (SDFAT_I(inode
)->i_size_aligned
> i_size_read(inode
))
3043 SDFAT_I(inode
)->i_size_aligned
= aligned_size
;
3045 /* After truncation :
3046 * 1) Delayed allocation is OFF
3047 * i_size = i_size_ondisk <= i_size_aligned
3048 * (useless size var.)
3050 * 2) Delayed allocation is ON
3051 * i_size = i_size_ondisk = i_size_aligned
3052 * (will be block-aligned after write)
3054 * i_size_ondisk < i_size <= i_size_aligned (block_aligned)
3055 * (will be block-aligned after write)
3061 static const struct inode_operations sdfat_file_inode_operations
= {
3062 .setattr
= sdfat_setattr
,
3063 .getattr
= sdfat_getattr
,
3064 #ifdef CONFIG_SDFAT_VIRTUAL_XATTR
3065 .listxattr
= sdfat_listxattr
,
3066 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
3067 .setxattr
= sdfat_setxattr
,
3068 .getxattr
= sdfat_getxattr
,
3069 .removexattr
= sdfat_removexattr
,
3074 /*======================================================================*/
3075 /* Address Space Operations */
3076 /*======================================================================*/
3077 /* 2-level option flag */
3078 #define BMAP_NOT_CREATE 0
3079 #define BMAP_ADD_BLOCK 1
3080 #define BMAP_ADD_CLUSTER 2
3081 #define BLOCK_ADDED(bmap_ops) (bmap_ops)
3082 static int sdfat_bmap(struct inode
*inode
, sector_t sector
, sector_t
*phys
,
3083 unsigned long *mapped_blocks
, int *create
)
3085 struct super_block
*sb
= inode
->i_sb
;
3086 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
3087 FS_INFO_T
*fsi
= &(sbi
->fsi
);
3088 const unsigned long blocksize
= sb
->s_blocksize
;
3089 const unsigned char blocksize_bits
= sb
->s_blocksize_bits
;
3090 sector_t last_block
;
3091 unsigned int cluster
, clu_offset
, sec_offset
;
3097 /* core code should handle EIO */
3099 if (fsi
->prev_eio
&& BLOCK_ADDED(*create
))
3103 if (((fsi
->vol_type
== FAT12
) || (fsi
->vol_type
== FAT16
)) &&
3104 (inode
->i_ino
== SDFAT_ROOT_INO
)) {
3105 if (sector
< (fsi
->dentries_in_root
>>
3106 (sb
->s_blocksize_bits
-DENTRY_SIZE_BITS
))) {
3107 *phys
= sector
+ fsi
->root_start_sector
;
3113 last_block
= (i_size_read(inode
) + (blocksize
- 1)) >> blocksize_bits
;
3114 if ((sector
>= last_block
) && (*create
== BMAP_NOT_CREATE
))
3117 /* Is this block already allocated? */
3118 clu_offset
= sector
>> fsi
->sect_per_clus_bits
; /* cluster offset */
3120 SDFAT_I(inode
)->fid
.size
= i_size_read(inode
);
3123 if (unlikely(__check_dfr_on(inode
,
3124 (loff_t
)((loff_t
)clu_offset
<< fsi
->cluster_size_bits
),
3125 (loff_t
)((loff_t
)(clu_offset
+ 1) << fsi
->cluster_size_bits
),
3127 err
= __do_dfr_map_cluster(inode
, clu_offset
, &cluster
);
3129 if (*create
& BMAP_ADD_CLUSTER
)
3130 err
= fsapi_map_clus(inode
, clu_offset
, &cluster
, 1);
3132 err
= fsapi_map_clus(inode
, clu_offset
, &cluster
, ALLOC_NOWHERE
);
3142 sdfat_statistics_set_rw(SDFAT_I(inode
)->fid
.flags
,
3143 clu_offset
, *create
& BMAP_ADD_CLUSTER
);
3145 if (!IS_CLUS_EOF(cluster
)) {
3146 /* sector offset in cluster */
3147 sec_offset
= sector
& (fsi
->sect_per_clus
- 1);
3149 *phys
= CLUS_TO_SECT(fsi
, cluster
) + sec_offset
;
3150 *mapped_blocks
= fsi
->sect_per_clus
- sec_offset
;
3154 /* Debug purpose (new clu needed) */
3155 ASSERT((*create
& BMAP_ADD_CLUSTER
) == 0);
3156 ASSERT(sector
>= last_block
);
3160 if (sector
< last_block
)
3161 *create
= BMAP_NOT_CREATE
;
3163 else if (sector
>= last_block
)
3166 if (iblock
<= last mapped
-block
)
3168 *create
= BMAP_NOT_CREATE
3169 else if (iblock
<= last cluster
)
3176 static int sdfat_da_prep_block(struct inode
*inode
, sector_t iblock
,
3177 struct buffer_head
*bh_result
, int create
)
3179 struct super_block
*sb
= inode
->i_sb
;
3180 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
3181 FS_INFO_T
*fsi
= &(sbi
->fsi
);
3182 unsigned long max_blocks
= bh_result
->b_size
>> inode
->i_blkbits
;
3183 unsigned long mapped_blocks
;
3187 int bmap_create
= create
? BMAP_ADD_BLOCK
: BMAP_NOT_CREATE
;
3193 ASSERT(fsi
->vol_type
== FAT32
);
3195 err
= sdfat_bmap(inode
, iblock
, &phys
, &mapped_blocks
, &bmap_create
);
3198 sdfat_fs_error_ratelimit(sb
, "%s: failed to bmap "
3199 "(iblock:%u, err:%d)", __func__
,
3204 sec_offset
= iblock
& (fsi
->sect_per_clus
- 1);
3207 /* the block in in the mapped cluster boundary */
3208 max_blocks
= min(mapped_blocks
, max_blocks
);
3209 map_bh(bh_result
, sb
, phys
);
3211 BUG_ON(BLOCK_ADDED(bmap_create
) && (sec_offset
== 0));
3213 } else if (create
== 1) {
3214 /* Not exist: new cluster needed */
3215 BUG_ON(!BLOCK_ADDED(bmap_create
));
3217 // Reserved Cluster (only if iblock is the first sector in a clu)
3218 if (sec_offset
== 0) {
3219 err
= fsapi_reserve_clus(inode
);
3222 sdfat_fs_error_ratelimit(sb
,
3223 "%s: failed to bmap "
3224 "(iblock:%u, err:%d)", __func__
,
3232 map_bh(bh_result
, sb
, ~((sector_t
) 0xffff));
3233 set_buffer_new(bh_result
);
3234 set_buffer_delay(bh_result
);
3237 /* get_block on non-existing addr. with create==0 */
3240 * i_size_aligned 보다 작으면 delay 매핑을 일단
3242 * - 0-fill 을 항상 하기에, FAT 에서는 문제 없음.
3243 * 중간에 영역이 꽉 찼으면, 디스크에 내려가지 않고는
3244 * invalidate 될 일이 없음
3250 /* Newly added blocks */
3251 if (BLOCK_ADDED(bmap_create
)) {
3252 set_buffer_new(bh_result
);
3254 SDFAT_I(inode
)->i_size_aligned
+= max_blocks
<< sb
->s_blocksize_bits
;
3256 /* i_size_ondisk changes if a block added in the existing cluster */
3257 #define num_clusters(value) ((value) ? (s32)((value - 1) >> fsi->cluster_size_bits) + 1 : 0)
3259 /* FOR GRACEFUL ERROR HANDLING */
3260 if (num_clusters(SDFAT_I(inode
)->i_size_aligned
) !=
3261 num_clusters(SDFAT_I(inode
)->i_size_ondisk
)) {
3262 EMSG("%s: inode(%p) invalid size (create(%d) "
3263 "bmap_create(%d) phys(%lld) aligned(%lld) "
3264 "on_disk(%lld) iblock(%u) sec_off(%d))\n",
3265 __func__
, inode
, create
, bmap_create
, (s64
)phys
,
3266 (s64
)SDFAT_I(inode
)->i_size_aligned
,
3267 (s64
)SDFAT_I(inode
)->i_size_ondisk
,
3270 sdfat_debug_bug_on(1);
3272 SDFAT_I(inode
)->i_size_ondisk
= SDFAT_I(inode
)->i_size_aligned
;
3275 pos
= (iblock
+ 1) << sb
->s_blocksize_bits
;
3276 /* Debug purpose - defensive coding */
3277 ASSERT(SDFAT_I(inode
)->i_size_aligned
== pos
);
3278 if (SDFAT_I(inode
)->i_size_aligned
< pos
)
3279 SDFAT_I(inode
)->i_size_aligned
= pos
;
3282 #ifdef CONFIG_SDFAT_TRACE_IO
3283 /* New page added (ASSERTION: 8 blocks per page) */
3284 if ((sec_offset
& 7) == 0)
3285 sbi
->stat_n_pages_added
++;
3289 /* FOR GRACEFUL ERROR HANDLING */
3290 if (i_size_read(inode
) > SDFAT_I(inode
)->i_size_aligned
) {
3291 sdfat_fs_error_ratelimit(sb
, "%s: invalid size (inode(%p), "
3292 "size(%llu) > aligned(%llu)\n", __func__
, inode
,
3293 i_size_read(inode
), SDFAT_I(inode
)->i_size_aligned
);
3294 sdfat_debug_bug_on(1);
3297 bh_result
->b_size
= max_blocks
<< sb
->s_blocksize_bits
;
3304 static int sdfat_get_block(struct inode
*inode
, sector_t iblock
,
3305 struct buffer_head
*bh_result
, int create
)
3307 struct super_block
*sb
= inode
->i_sb
;
3308 unsigned long max_blocks
= bh_result
->b_size
>> inode
->i_blkbits
;
3310 unsigned long mapped_blocks
;
3313 int bmap_create
= create
? BMAP_ADD_CLUSTER
: BMAP_NOT_CREATE
;
3316 err
= sdfat_bmap(inode
, iblock
, &phys
, &mapped_blocks
, &bmap_create
);
3319 sdfat_fs_error_ratelimit(sb
, "%s: failed to bmap "
3320 "(inode:%p iblock:%u, err:%d)",
3321 __func__
, inode
, (u32
)iblock
, err
);
3326 max_blocks
= min(mapped_blocks
, max_blocks
);
3328 /* Treat newly added block / cluster */
3329 if (BLOCK_ADDED(bmap_create
) || buffer_delay(bh_result
)) {
3331 /* Update i_size_ondisk */
3332 pos
= (iblock
+ 1) << sb
->s_blocksize_bits
;
3333 if (SDFAT_I(inode
)->i_size_ondisk
< pos
) {
3335 if ((pos
- SDFAT_I(inode
)->i_size_ondisk
) > bh_result
->b_size
) {
3336 /* This never happens without DA */
3337 MMSG("Jumping get_block\n");
3340 SDFAT_I(inode
)->i_size_ondisk
= pos
;
3341 sdfat_debug_check_clusters(inode
);
3344 if (BLOCK_ADDED(bmap_create
)) {
3346 * create == 1 only if iblock > i_size
3351 * Truncate와 동시에 발생할 경우,
3352 * i_size < (i_block 위치) 면서 buffer_delay()가
3355 * 기존에 할당된 영역을 다시 쓸 뿐이므로 큰 문제
3356 * 없지만, 그 경우, 미리 i_size_aligned 가 확장된
3360 /* FOR GRACEFUL ERROR HANDLING */
3361 if (buffer_delay(bh_result
) &&
3362 (pos
> SDFAT_I(inode
)->i_size_aligned
)) {
3363 sdfat_fs_error(sb
, "requested for bmap "
3364 "out of range(pos:(%llu)>i_size_aligned(%llu)\n",
3365 pos
, SDFAT_I(inode
)->i_size_aligned
);
3366 sdfat_debug_bug_on(1);
3370 set_buffer_new(bh_result
);
3373 * adjust i_size_aligned if i_size_ondisk is
3374 * bigger than it. (i.e. non-DA)
3376 if (SDFAT_I(inode
)->i_size_ondisk
>
3377 SDFAT_I(inode
)->i_size_aligned
) {
3378 SDFAT_I(inode
)->i_size_aligned
=
3379 SDFAT_I(inode
)->i_size_ondisk
;
3383 if (buffer_delay(bh_result
))
3384 clear_buffer_delay(bh_result
);
3388 if (SDFAT_I(inode
)->i_size_ondisk
>
3389 SDFAT_I(inode
)->i_size_aligned
) {
3390 /* Only after truncate
3391 * and the two size variables should indicate
3394 unsigned int blocksize
= 1 << inode
->i_blkbits
;
3395 BUG_ON(SDFAT_I(inode
)->i_size_ondisk
-
3396 SDFAT_I(inode
)->i_size_aligned
>= blocksize
);
3400 map_bh(bh_result
, sb
, phys
);
3403 bh_result
->b_size
= max_blocks
<< sb
->s_blocksize_bits
;
3409 static int sdfat_readpage(struct file
*file
, struct page
*page
)
3413 ret
= mpage_readpage(page
, sdfat_get_block
);
3417 static int sdfat_readpages(struct file
*file
, struct address_space
*mapping
,
3418 struct list_head
*pages
, unsigned int nr_pages
)
3422 ret
= mpage_readpages(mapping
, pages
, nr_pages
, sdfat_get_block
);
3426 static inline void sdfat_submit_fullpage_bio(struct block_device
*bdev
,
3427 sector_t sector
, unsigned int length
, struct page
*page
)
3429 /* Single page bio submit */
3432 BUG_ON((length
> PAGE_SIZE
) || (length
== 0));
3435 * If __GFP_WAIT is set, then bio_alloc will always be able to allocate
3436 * a bio. This is due to the mempool guarantees. To make this work, callers
3437 * must never allocate more than 1 bio at a time from this pool.
3439 * #define GFP_NOIO (__GFP_WAIT)
3441 bio
= bio_alloc(GFP_NOIO
, 1);
3443 bio
->bi_bdev
= bdev
;
3445 bio
->bi_io_vec
[0].bv_page
= page
; /* Inline vec */
3446 bio
->bi_io_vec
[0].bv_len
= length
; /* PAGE_SIZE */
3447 bio
->bi_io_vec
[0].bv_offset
= 0;
3448 __sdfat_set_bio_iterate(bio
, sector
, length
, 0, 0);
3450 bio
->bi_end_io
= sdfat_writepage_end_io
;
3451 __sdfat_submit_bio_write(bio
);
3454 static int sdfat_writepage(struct page
*page
, struct writeback_control
*wbc
)
3456 struct inode
* const inode
= page
->mapping
->host
;
3457 struct super_block
*sb
= inode
->i_sb
;
3458 loff_t i_size
= i_size_read(inode
);
3459 const pgoff_t end_index
= i_size
>> PAGE_SHIFT
;
3460 const unsigned int blocks_per_page
= PAGE_SIZE
>> inode
->i_blkbits
;
3461 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
3462 struct buffer_head
*bh
, *head
;
3463 sector_t block
, block_0
, last_phys
;
3465 unsigned int nr_blocks_towrite
= blocks_per_page
;
3467 /* Don't distinguish 0-filled/clean block.
3468 * Just write back the whole page
3470 if (fsi
->cluster_size
< PAGE_SIZE
)
3473 if (!PageUptodate(page
)) {
3474 MMSG("%s: Not up-to-date page -> block_write_full_page\n",
3479 if (page
->index
>= end_index
) {
3480 /* last page or outside i_size */
3481 unsigned int offset
= i_size
& (PAGE_SIZE
-1);
3483 /* If a truncation is in progress */
3484 if (page
->index
> end_index
|| !offset
)
3487 /* 0-fill after i_size */
3488 zero_user_segment(page
, offset
, PAGE_SIZE
);
3491 if (!page_has_buffers(page
)) {
3492 MMSG("WP: No buffers -> block_write_full_page\n");
3496 block
= (sector_t
)page
->index
<< (PAGE_SHIFT
- inode
->i_blkbits
);
3497 block_0
= block
; /* first block */
3498 head
= page_buffers(page
);
3503 BUG_ON(buffer_locked(bh
));
3505 if (!buffer_dirty(bh
) || !buffer_uptodate(bh
)) {
3506 if (nr_blocks_towrite
== blocks_per_page
)
3507 nr_blocks_towrite
= (unsigned int) (block
- block_0
);
3509 BUG_ON(nr_blocks_towrite
>= blocks_per_page
);
3511 // !uptodate but dirty??
3512 if (buffer_dirty(bh
))
3515 // Nothing to writeback in this block
3516 bh
= bh
->b_this_page
;
3521 if (nr_blocks_towrite
!= blocks_per_page
)
3522 // Dirty -> Non-dirty -> Dirty again case
3526 if (!buffer_mapped(bh
) || buffer_delay(bh
)) {
3527 BUG_ON(bh
->b_size
!= (1 << (inode
->i_blkbits
)));
3528 ret
= sdfat_get_block(inode
, block
, bh
, 1);
3532 if (buffer_new(bh
)) {
3533 clear_buffer_new(bh
);
3534 unmap_underlying_metadata(bh
->b_bdev
, bh
->b_blocknr
);
3538 /* continuity check */
3539 if (((last_phys
+ 1) != bh
->b_blocknr
) && (last_phys
!= 0)) {
3540 DMSG("Non-contiguous block mapping in single page");
3544 last_phys
= bh
->b_blocknr
;
3545 bh
= bh
->b_this_page
;
3547 } while (bh
!= head
);
3549 if (nr_blocks_towrite
== 0) {
3550 DMSG("Page dirty but no dirty bh? alloc_208\n");
3557 clear_buffer_dirty(bh
);
3558 bh
= bh
->b_this_page
;
3559 } while (bh
!= head
);
3561 BUG_ON(PageWriteback(page
));
3562 set_page_writeback(page
);
3565 * Turn off MAPPED flag in victim's bh if defrag on.
3566 * Another write_begin can starts after get_block for defrag victims called.
3567 * In this case, write_begin calls get_block and get original block number
3568 * and previous defrag will be canceled.
3570 if (unlikely(__check_dfr_on(inode
,
3571 (loff_t
)(page
->index
<< PAGE_SHIFT
),
3572 (loff_t
)((page
->index
+ 1) << PAGE_SHIFT
),
3575 clear_buffer_mapped(bh
);
3576 bh
= bh
->b_this_page
;
3577 } while (bh
!= head
);
3580 // Trace # of pages queued (Approx.)
3581 atomic_inc(&SDFAT_SB(sb
)->stat_n_pages_queued
);
3583 sdfat_submit_fullpage_bio(head
->b_bdev
,
3584 head
->b_blocknr
<< (inode
->i_blkbits
- sb
->s_blocksize_bits
),
3585 nr_blocks_towrite
<< inode
->i_blkbits
,
3593 #ifdef CONFIG_SDFAT_TRACE_IO
3594 SDFAT_SB(sb
)->stat_n_pages_confused
++;
3596 ret
= block_write_full_page(page
, sdfat_get_block
, wbc
);
3600 static int sdfat_da_writepages(struct address_space
*mapping
,
3601 struct writeback_control
*wbc
)
3603 MMSG("%s(inode:%p) with nr_to_write = 0x%08lx "
3604 "(ku %d, bg %d, tag %d, rc %d )\n",
3605 __func__
, mapping
->host
, wbc
->nr_to_write
,
3606 wbc
->for_kupdate
, wbc
->for_background
, wbc
->tagged_writepages
,
3609 ASSERT(mapping
->a_ops
== &sdfat_da_aops
);
3611 #ifdef CONFIG_SDFAT_ALIGNED_MPAGE_WRITE
3612 if (SDFAT_SB(mapping
->host
->i_sb
)->options
.adj_req
)
3613 return sdfat_mpage_writepages(mapping
, wbc
, sdfat_get_block
);
3615 return generic_writepages(mapping
, wbc
);
3618 static int sdfat_writepages(struct address_space
*mapping
,
3619 struct writeback_control
*wbc
)
3621 MMSG("%s(inode:%p) with nr_to_write = 0x%08lx "
3622 "(ku %d, bg %d, tag %d, rc %d )\n",
3623 __func__
, mapping
->host
, wbc
->nr_to_write
,
3624 wbc
->for_kupdate
, wbc
->for_background
, wbc
->tagged_writepages
,
3627 ASSERT(mapping
->a_ops
== &sdfat_aops
);
3629 #ifdef CONFIG_SDFAT_ALIGNED_MPAGE_WRITE
3630 if (SDFAT_SB(mapping
->host
->i_sb
)->options
.adj_req
)
3631 return sdfat_mpage_writepages(mapping
, wbc
, sdfat_get_block
);
3633 return mpage_writepages(mapping
, wbc
, sdfat_get_block
);
3636 static void sdfat_write_failed(struct address_space
*mapping
, loff_t to
)
3638 struct inode
*inode
= mapping
->host
;
3640 if (to
> i_size_read(inode
)) {
3641 __sdfat_truncate_pagecache(inode
, to
, i_size_read(inode
));
3642 sdfat_truncate(inode
, SDFAT_I(inode
)->i_size_aligned
);
3646 static int __sdfat_write_begin(struct file
*file
, struct address_space
*mapping
,
3647 loff_t pos
, unsigned int len
,
3648 unsigned int flags
, struct page
**pagep
,
3649 void **fsdata
, get_block_t
*get_block
,
3650 loff_t
*bytes
, const char *fname
)
3654 __cancel_dfr_work(mapping
->host
, pos
, (loff_t
)(pos
+ len
), fname
);
3657 ret
= cont_write_begin(file
, mapping
, pos
, len
, flags
, pagep
, fsdata
,
3661 sdfat_write_failed(mapping
, pos
+len
);
3667 static int sdfat_da_write_begin(struct file
*file
, struct address_space
*mapping
,
3668 loff_t pos
, unsigned int len
, unsigned int flags
,
3669 struct page
**pagep
, void **fsdata
)
3671 return __sdfat_write_begin(file
, mapping
, pos
, len
, flags
,
3672 pagep
, fsdata
, sdfat_da_prep_block
,
3673 &SDFAT_I(mapping
->host
)->i_size_aligned
,
3678 static int sdfat_write_begin(struct file
*file
, struct address_space
*mapping
,
3679 loff_t pos
, unsigned int len
, unsigned int flags
,
3680 struct page
**pagep
, void **fsdata
)
3682 return __sdfat_write_begin(file
, mapping
, pos
, len
, flags
,
3683 pagep
, fsdata
, sdfat_get_block
,
3684 &SDFAT_I(mapping
->host
)->i_size_ondisk
,
3688 static int sdfat_write_end(struct file
*file
, struct address_space
*mapping
,
3689 loff_t pos
, unsigned int len
, unsigned int copied
,
3690 struct page
*pagep
, void *fsdata
)
3692 struct inode
*inode
= mapping
->host
;
3693 FILE_ID_T
*fid
= &(SDFAT_I(inode
)->fid
);
3696 err
= generic_write_end(file
, mapping
, pos
, len
, copied
, pagep
, fsdata
);
3698 /* FOR GRACEFUL ERROR HANDLING */
3699 if (SDFAT_I(inode
)->i_size_aligned
< i_size_read(inode
)) {
3700 sdfat_fs_error(inode
->i_sb
, "invalid size(size(%llu) "
3701 "> aligned(%llu)\n", i_size_read(inode
),
3702 SDFAT_I(inode
)->i_size_aligned
);
3703 sdfat_debug_bug_on(1);
3707 sdfat_write_failed(mapping
, pos
+len
);
3709 if (!(err
< 0) && !(fid
->attr
& ATTR_ARCHIVE
)) {
3710 inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME_SEC
;
3711 fid
->attr
|= ATTR_ARCHIVE
;
3712 mark_inode_dirty(inode
);
3718 static inline ssize_t
__sdfat_direct_IO(int rw
, struct kiocb
*iocb
,
3719 struct inode
*inode
, void *iov_u
, loff_t offset
,
3720 loff_t count
, unsigned long nr_segs
)
3722 struct address_space
*mapping
= inode
->i_mapping
;
3723 loff_t size
= offset
+ count
;
3728 * FIXME: blockdev_direct_IO() doesn't use ->write_begin(),
3729 * so we need to update the ->i_size_aligned to block boundary.
3731 * But we must fill the remaining area or hole by nul for
3732 * updating ->i_size_aligned
3734 * Return 0, and fallback to normal buffered write.
3736 if (SDFAT_I(inode
)->i_size_aligned
< size
)
3741 * sdFAT need to use the DIO_LOCKING for avoiding the race
3742 * condition of sdfat_get_block() and ->truncate().
3744 ret
= __sdfat_blkdev_direct_IO(rw
, iocb
, inode
, iov_u
, offset
, nr_segs
);
3745 if (ret
< 0 && (rw
& WRITE
))
3746 sdfat_write_failed(mapping
, size
);
3751 static const struct address_space_operations sdfat_aops
= {
3752 .readpage
= sdfat_readpage
,
3753 .readpages
= sdfat_readpages
,
3754 .writepage
= sdfat_writepage
,
3755 .writepages
= sdfat_writepages
,
3756 .write_begin
= sdfat_write_begin
,
3757 .write_end
= sdfat_write_end
,
3758 .direct_IO
= sdfat_direct_IO
,
3759 .bmap
= sdfat_aop_bmap
3762 static const struct address_space_operations sdfat_da_aops
= {
3763 .readpage
= sdfat_readpage
,
3764 .readpages
= sdfat_readpages
,
3765 .writepage
= sdfat_writepage
,
3766 .writepages
= sdfat_da_writepages
,
3767 .write_begin
= sdfat_da_write_begin
,
3768 .write_end
= sdfat_write_end
,
3769 .direct_IO
= sdfat_direct_IO
,
3770 .bmap
= sdfat_aop_bmap
3773 /*======================================================================*/
3774 /* Super Operations */
3775 /*======================================================================*/
3777 static inline unsigned long sdfat_hash(loff_t i_pos
)
3779 return hash_32(i_pos
, SDFAT_HASH_BITS
);
3782 static void sdfat_attach(struct inode
*inode
, loff_t i_pos
)
3784 struct sdfat_sb_info
*sbi
= SDFAT_SB(inode
->i_sb
);
3785 struct hlist_head
*head
= sbi
->inode_hashtable
+ sdfat_hash(i_pos
);
3787 spin_lock(&sbi
->inode_hash_lock
);
3788 SDFAT_I(inode
)->i_pos
= i_pos
;
3789 hlist_add_head(&SDFAT_I(inode
)->i_hash_fat
, head
);
3790 spin_unlock(&sbi
->inode_hash_lock
);
3793 static void sdfat_detach(struct inode
*inode
)
3795 struct sdfat_sb_info
*sbi
= SDFAT_SB(inode
->i_sb
);
3797 spin_lock(&sbi
->inode_hash_lock
);
3798 hlist_del_init(&SDFAT_I(inode
)->i_hash_fat
);
3799 SDFAT_I(inode
)->i_pos
= 0;
3800 spin_unlock(&sbi
->inode_hash_lock
);
3804 /* doesn't deal with root inode */
3805 static int sdfat_fill_inode(struct inode
*inode
, const FILE_ID_T
*fid
)
3807 struct sdfat_sb_info
*sbi
= SDFAT_SB(inode
->i_sb
);
3808 FS_INFO_T
*fsi
= &(sbi
->fsi
);
3810 u64 size
= fid
->size
;
3812 memcpy(&(SDFAT_I(inode
)->fid
), fid
, sizeof(FILE_ID_T
));
3814 SDFAT_I(inode
)->i_pos
= 0;
3815 SDFAT_I(inode
)->target
= NULL
;
3816 inode
->i_uid
= sbi
->options
.fs_uid
;
3817 inode
->i_gid
= sbi
->options
.fs_gid
;
3819 inode
->i_generation
= get_seconds();
3821 if (fsapi_read_inode(inode
, &info
) < 0) {
3822 MMSG("%s: failed to read stat!\n", __func__
);
3826 if (info
.Attr
& ATTR_SUBDIR
) { /* directory */
3827 inode
->i_generation
&= ~1;
3828 inode
->i_mode
= sdfat_make_mode(sbi
, info
.Attr
, S_IRWXUGO
);
3829 inode
->i_op
= &sdfat_dir_inode_operations
;
3830 inode
->i_fop
= &sdfat_dir_operations
;
3832 set_nlink(inode
, info
.NumSubdirs
);
3833 } else if (info
.Attr
& ATTR_SYMLINK
) { /* symbolic link */
3834 inode
->i_op
= &sdfat_symlink_inode_operations
;
3835 inode
->i_generation
|= 1;
3836 inode
->i_mode
= sdfat_make_mode(sbi
, info
.Attr
, S_IRWXUGO
);
3837 } else { /* regular file */
3838 inode
->i_generation
|= 1;
3839 inode
->i_mode
= sdfat_make_mode(sbi
, info
.Attr
, S_IRWXUGO
);
3840 inode
->i_op
= &sdfat_file_inode_operations
;
3841 inode
->i_fop
= &sdfat_file_operations
;
3843 if (sbi
->options
.improved_allocation
& SDFAT_ALLOC_DELAY
)
3844 inode
->i_mapping
->a_ops
= &sdfat_da_aops
;
3846 inode
->i_mapping
->a_ops
= &sdfat_aops
;
3848 inode
->i_mapping
->nrpages
= 0;
3853 * Use fid->size instead of info.Size
3854 * because info.Size means the value saved on disk
3856 i_size_write(inode
, size
);
3858 /* ondisk and aligned size should be aligned with block size */
3859 if (size
& (inode
->i_sb
->s_blocksize
- 1)) {
3860 size
|= (inode
->i_sb
->s_blocksize
- 1);
3864 SDFAT_I(inode
)->i_size_aligned
= size
;
3865 SDFAT_I(inode
)->i_size_ondisk
= size
;
3866 sdfat_debug_check_clusters(inode
);
3868 sdfat_save_attr(inode
, info
.Attr
);
3870 inode
->i_blocks
= ((i_size_read(inode
) + (fsi
->cluster_size
- 1))
3871 & ~((loff_t
)fsi
->cluster_size
- 1)) >> inode
->i_blkbits
;
3873 sdfat_time_fat2unix(sbi
, &inode
->i_mtime
, &info
.ModifyTimestamp
);
3874 sdfat_time_fat2unix(sbi
, &inode
->i_ctime
, &info
.CreateTimestamp
);
3875 sdfat_time_fat2unix(sbi
, &inode
->i_atime
, &info
.AccessTimestamp
);
3877 __init_dfr_info(inode
);
3882 static struct inode
*sdfat_build_inode(struct super_block
*sb
,
3883 const FILE_ID_T
*fid
, loff_t i_pos
) {
3884 struct inode
*inode
;
3887 inode
= sdfat_iget(sb
, i_pos
);
3890 inode
= new_inode(sb
);
3892 inode
= ERR_PTR(-ENOMEM
);
3895 inode
->i_ino
= iunique(sb
, SDFAT_ROOT_INO
);
3896 inode
->i_version
= 1;
3897 err
= sdfat_fill_inode(inode
, fid
);
3900 inode
= ERR_PTR(err
);
3903 sdfat_attach(inode
, i_pos
);
3904 insert_inode_hash(inode
);
3909 static struct inode
*sdfat_alloc_inode(struct super_block
*sb
)
3911 struct sdfat_inode_info
*ei
;
3913 ei
= kmem_cache_alloc(sdfat_inode_cachep
, GFP_NOFS
);
3916 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
3917 init_rwsem(&ei
->truncate_lock
);
3919 return &ei
->vfs_inode
;
3922 static void sdfat_destroy_inode(struct inode
*inode
)
3924 if (SDFAT_I(inode
)->target
) {
3925 kfree(SDFAT_I(inode
)->target
);
3926 SDFAT_I(inode
)->target
= NULL
;
3929 kmem_cache_free(sdfat_inode_cachep
, SDFAT_I(inode
));
3932 static int __sdfat_write_inode(struct inode
*inode
, int sync
)
3934 struct super_block
*sb
= inode
->i_sb
;
3935 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
3938 if (inode
->i_ino
== SDFAT_ROOT_INO
)
3941 info
.Attr
= sdfat_make_attr(inode
);
3942 info
.Size
= i_size_read(inode
);
3944 sdfat_time_unix2fat(sbi
, &inode
->i_mtime
, &info
.ModifyTimestamp
);
3945 sdfat_time_unix2fat(sbi
, &inode
->i_ctime
, &info
.CreateTimestamp
);
3946 sdfat_time_unix2fat(sbi
, &inode
->i_atime
, &info
.AccessTimestamp
);
3948 if (!__support_write_inode_sync(sb
))
3951 /* FIXME : Do we need handling error? */
3952 return fsapi_write_inode(inode
, &info
, sync
);
3955 static int sdfat_sync_inode(struct inode
*inode
)
3957 return __sdfat_write_inode(inode
, 1);
3960 static int sdfat_write_inode(struct inode
*inode
, struct writeback_control
*wbc
)
3962 return __sdfat_write_inode(inode
, wbc
->sync_mode
== WB_SYNC_ALL
);
3965 static void sdfat_evict_inode(struct inode
*inode
)
3967 truncate_inode_pages(&inode
->i_data
, 0);
3969 if (!inode
->i_nlink
) {
3970 loff_t old_size
= i_size_read(inode
);
3972 i_size_write(inode
, 0);
3974 SDFAT_I(inode
)->fid
.size
= old_size
;
3976 __cancel_dfr_work(inode
, 0, (loff_t
)old_size
, __func__
);
3978 /* TO CHECK evicting directory works correctly */
3979 MMSG("%s: inode(%p) evict %s (size(%llu) to zero)\n",
3981 S_ISDIR(inode
->i_mode
) ? "directory" : "file",
3983 fsapi_truncate(inode
, old_size
, 0);
3986 invalidate_inode_buffers(inode
);
3988 fsapi_invalidate_extent(inode
);
3989 sdfat_detach(inode
);
3991 /* after end of this function, caller will remove inode hash */
3992 /* remove_inode_hash(inode); */
3997 static void sdfat_put_super(struct super_block
*sb
)
3999 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
4002 sdfat_log_msg(sb
, KERN_INFO
, "trying to unmount...");
4004 __cancel_delayed_work_sync(sbi
);
4006 if (__is_sb_dirty(sb
))
4007 sdfat_write_super(sb
);
4009 __free_dfr_mem_if_required(sb
);
4010 err
= fsapi_umount(sb
);
4012 if (sbi
->nls_disk
) {
4013 unload_nls(sbi
->nls_disk
);
4014 sbi
->nls_disk
= NULL
;
4015 sbi
->options
.codepage
= sdfat_default_codepage
;
4018 unload_nls(sbi
->nls_io
);
4021 if (sbi
->options
.iocharset
!= sdfat_default_iocharset
) {
4022 kfree(sbi
->options
.iocharset
);
4023 sbi
->options
.iocharset
= sdfat_default_iocharset
;
4026 sb
->s_fs_info
= NULL
;
4028 kobject_del(&sbi
->sb_kobj
);
4029 kobject_put(&sbi
->sb_kobj
);
4030 if (!sbi
->use_vmalloc
)
4035 sdfat_log_msg(sb
, KERN_INFO
, "unmounted successfully! %s",
4036 err
? "(with previous I/O errors)" : "");
4039 static inline void __flush_delayed_meta(struct super_block
*sb
, s32 sync
)
4041 #ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
4042 fsapi_cache_flush(sb
, sync
);
4048 static void sdfat_write_super(struct super_block
*sb
)
4056 #ifdef CONFIG_SDFAT_DFR
4057 if (atomic_read(&(SDFAT_SB(sb
)->dfr_info
.stat
)) == DFR_SB_STAT_VALID
)
4058 fsapi_dfr_update_fat_next(sb
);
4061 /* flush delayed FAT/DIR dirty */
4062 __flush_delayed_meta(sb
, 0);
4064 if (!(sb
->s_flags
& MS_RDONLY
))
4065 fsapi_sync_fs(sb
, 0);
4071 /* Issuing bdev requests is needed
4072 * to guarantee DIR updates in time
4073 * whether w/ or w/o delayed DIR dirty feature.
4074 * (otherwise DIR updates could be delayed for 5 + 5 secs at max.)
4076 sync_blockdev(sb
->s_bdev
);
4078 #if (defined(CONFIG_SDFAT_DFR) && defined(CONFIG_SDFAT_DFR_DEBUG))
4080 fsapi_dfr_spo_test(sb
, DFR_SPO_FAT_NEXT
, __func__
);
4082 MMSG("BD: sdfat_write_super (bdev_sync for %ld ms)\n",
4083 (jiffies
- time
) * 1000 / HZ
);
4087 static void __dfr_update_fat_next(struct super_block
*sb
)
4089 #ifdef CONFIG_SDFAT_DFR
4090 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
4092 if (sbi
->options
.defrag
&&
4093 (atomic_read(&sbi
->dfr_info
.stat
) == DFR_SB_STAT_VALID
)) {
4094 fsapi_dfr_update_fat_next(sb
);
4099 static void __dfr_update_fat_prev(struct super_block
*sb
, int wait
)
4101 #ifdef CONFIG_SDFAT_DFR
4102 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
4103 struct defrag_info
*sb_dfr
= &sbi
->dfr_info
;
4104 /* static time available? */
4105 static int time
; /* initialized by zero */
4106 int uevent
= 0, total
= 0, clean
= 0, full
= 0;
4107 int spent
= jiffies
- time
;
4109 if (!(sbi
->options
.defrag
&& wait
))
4113 /* Update FAT for defrag */
4114 if (atomic_read(&(sbi
->dfr_info
.stat
)) == DFR_SB_STAT_VALID
) {
4116 fsapi_dfr_update_fat_prev(sb
, 0);
4118 /* flush delayed FAT/DIR dirty */
4119 __flush_delayed_meta(sb
, 0);
4121 /* Complete defrag req */
4122 fsapi_sync_fs(sb
, 1);
4123 atomic_set(&sb_dfr
->stat
, DFR_SB_STAT_REQ
);
4124 complete_all(&sbi
->dfr_complete
);
4125 } else if (((spent
< 0) || (spent
> DFR_DEFAULT_TIMEOUT
)) &&
4126 (atomic_read(&(sbi
->dfr_info
.stat
)) == DFR_SB_STAT_IDLE
)) {
4127 uevent
= fsapi_dfr_check_dfr_required(sb
, &total
, &clean
, &full
);
4133 kobject_uevent(&SDFAT_SB(sb
)->sb_kobj
, KOBJ_CHANGE
);
4134 dfr_debug("uevent for defrag_daemon, total_au %d, "
4135 "clean_au %d, full_au %d", total
, clean
, full
);
4140 static int sdfat_sync_fs(struct super_block
*sb
, int wait
)
4144 /* If there are some dirty buffers in the bdev inode */
4145 if (__is_sb_dirty(sb
)) {
4149 __dfr_update_fat_next(sb
);
4151 err
= fsapi_sync_fs(sb
, 1);
4153 #if (defined(CONFIG_SDFAT_DFR) && defined(CONFIG_SDFAT_DFR_DEBUG))
4155 fsapi_dfr_spo_test(sb
, DFR_SPO_FAT_NEXT
, __func__
);
4161 __dfr_update_fat_prev(sb
, wait
);
4166 static int sdfat_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
4170 * fixed the slow-call problem because of volume-lock contention.
4172 struct super_block
*sb
= dentry
->d_sb
;
4173 u64 id
= huge_encode_dev(sb
->s_bdev
->bd_dev
);
4174 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
4177 /* fsapi_statfs will try to get a volume lock if needed */
4178 if (fsapi_statfs(sb
, &info
))
4182 sdfat_msg(sb
, KERN_INFO
, "called statfs with previous"
4183 " I/O error(0x%02X).", fsi
->prev_eio
);
4185 buf
->f_type
= sb
->s_magic
;
4186 buf
->f_bsize
= info
.ClusterSize
;
4187 buf
->f_blocks
= info
.NumClusters
;
4188 buf
->f_bfree
= info
.FreeClusters
;
4189 buf
->f_bavail
= info
.FreeClusters
;
4190 buf
->f_fsid
.val
[0] = (u32
)id
;
4191 buf
->f_fsid
.val
[1] = (u32
)(id
>> 32);
4192 buf
->f_namelen
= 260;
4197 static int sdfat_remount(struct super_block
*sb
, int *flags
, char *data
)
4199 unsigned long prev_sb_flags
;
4200 char *orig_data
= kstrdup(data
, GFP_KERNEL
);
4201 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
4202 FS_INFO_T
*fsi
= &(sbi
->fsi
);
4204 *flags
|= MS_NODIRATIME
;
4206 prev_sb_flags
= sb
->s_flags
;
4208 sdfat_remount_syncfs(sb
);
4210 fsapi_set_vol_flags(sb
, VOL_CLEAN
, 1);
4212 sdfat_log_msg(sb
, KERN_INFO
, "re-mounted(%s->%s), eio=0x%x, Opts: %s",
4213 (prev_sb_flags
& MS_RDONLY
) ? "ro" : "rw",
4214 (*flags
& MS_RDONLY
) ? "ro" : "rw",
4215 fsi
->prev_eio
, orig_data
);
4220 static int __sdfat_show_options(struct seq_file
*m
, struct super_block
*sb
)
4222 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
4223 struct sdfat_mount_options
*opts
= &sbi
->options
;
4224 FS_INFO_T
*fsi
= &(sbi
->fsi
);
4226 /* Show partition info */
4227 seq_printf(m
, ",fs=%s", sdfat_get_vol_type_str(fsi
->vol_type
));
4229 seq_printf(m
, ",eio=0x%x", fsi
->prev_eio
);
4230 if (!uid_eq(opts
->fs_uid
, GLOBAL_ROOT_UID
))
4231 seq_printf(m
, ",uid=%u",
4232 from_kuid_munged(&init_user_ns
, opts
->fs_uid
));
4233 if (!gid_eq(opts
->fs_gid
, GLOBAL_ROOT_GID
))
4234 seq_printf(m
, ",gid=%u",
4235 from_kgid_munged(&init_user_ns
, opts
->fs_gid
));
4236 seq_printf(m
, ",fmask=%04o", opts
->fs_fmask
);
4237 seq_printf(m
, ",dmask=%04o", opts
->fs_dmask
);
4238 if (opts
->allow_utime
)
4239 seq_printf(m
, ",allow_utime=%04o", opts
->allow_utime
);
4241 seq_printf(m
, ",codepage=%s", sbi
->nls_disk
->charset
);
4243 seq_printf(m
, ",iocharset=%s", sbi
->nls_io
->charset
);
4245 seq_puts(m
, ",utf8");
4246 if (sbi
->fsi
.vol_type
!= EXFAT
)
4247 seq_puts(m
, ",shortname=winnt");
4248 seq_printf(m
, ",namecase=%u", opts
->casesensitive
);
4250 seq_puts(m
, ",tz=UTC");
4251 if (opts
->improved_allocation
& SDFAT_ALLOC_DELAY
)
4252 seq_puts(m
, ",delay");
4253 if (opts
->improved_allocation
& SDFAT_ALLOC_SMART
)
4254 seq_printf(m
, ",smart,ausize=%u", opts
->amap_opt
.sect_per_au
);
4256 seq_puts(m
, ",defrag");
4257 if (opts
->adj_hidsect
)
4258 seq_puts(m
, ",adj_hid");
4260 seq_puts(m
, ",adj_req");
4261 seq_printf(m
, ",symlink=%u", opts
->symlink
);
4262 seq_printf(m
, ",bps=%ld", sb
->s_blocksize
);
4263 if (opts
->errors
== SDFAT_ERRORS_CONT
)
4264 seq_puts(m
, ",errors=continue");
4265 else if (opts
->errors
== SDFAT_ERRORS_PANIC
)
4266 seq_puts(m
, ",errors=panic");
4268 seq_puts(m
, ",errors=remount-ro");
4270 seq_puts(m
, ",discard");
4275 static const struct super_operations sdfat_sops
= {
4276 .alloc_inode
= sdfat_alloc_inode
,
4277 .destroy_inode
= sdfat_destroy_inode
,
4278 .write_inode
= sdfat_write_inode
,
4279 .evict_inode
= sdfat_evict_inode
,
4280 .put_super
= sdfat_put_super
,
4281 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)
4282 .write_super
= sdfat_write_super
,
4284 .sync_fs
= sdfat_sync_fs
,
4285 .statfs
= sdfat_statfs
,
4286 .remount_fs
= sdfat_remount
,
4287 .show_options
= sdfat_show_options
,
4290 /*======================================================================*/
4291 /* SYSFS Operations */
4292 /*======================================================================*/
4293 #define SDFAT_ATTR(name, mode, show, store) \
4294 static struct sdfat_attr sdfat_attr_##name = __ATTR(name, mode, show, store)
4297 struct attribute attr
;
4298 ssize_t (*show
)(struct sdfat_sb_info
*, char *);
4299 ssize_t (*store
)(struct sdfat_sb_info
*, const char *, size_t);
4302 static ssize_t
sdfat_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *buf
)
4304 struct sdfat_sb_info
*sbi
= container_of(kobj
, struct sdfat_sb_info
, sb_kobj
);
4305 struct sdfat_attr
*a
= container_of(attr
, struct sdfat_attr
, attr
);
4307 return a
->show
? a
->show(sbi
, buf
) : 0;
4310 static ssize_t
sdfat_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
4311 const char *buf
, size_t len
)
4313 struct sdfat_sb_info
*sbi
= container_of(kobj
, struct sdfat_sb_info
, sb_kobj
);
4314 struct sdfat_attr
*a
= container_of(attr
, struct sdfat_attr
, attr
);
4316 return a
->store
? a
->store(sbi
, buf
, len
) : len
;
4319 static const struct sysfs_ops sdfat_attr_ops
= {
4320 .show
= sdfat_attr_show
,
4321 .store
= sdfat_attr_store
,
4325 static ssize_t
type_show(struct sdfat_sb_info
*sbi
, char *buf
)
4327 FS_INFO_T
*fsi
= &(sbi
->fsi
);
4329 return snprintf(buf
, PAGE_SIZE
, "%s\n", sdfat_get_vol_type_str(fsi
->vol_type
));
4331 SDFAT_ATTR(type
, 0444, type_show
, NULL
);
4333 static ssize_t
eio_show(struct sdfat_sb_info
*sbi
, char *buf
)
4335 FS_INFO_T
*fsi
= &(sbi
->fsi
);
4337 return snprintf(buf
, PAGE_SIZE
, "0x%x\n", fsi
->prev_eio
);
4339 SDFAT_ATTR(eio
, 0444, eio_show
, NULL
);
4341 static ssize_t
fratio_show(struct sdfat_sb_info
*sbi
, char *buf
)
4343 unsigned int n_total_au
= 0;
4344 unsigned int n_clean_au
= 0;
4345 unsigned int n_full_au
= 0;
4346 unsigned int n_dirty_au
= 0;
4347 unsigned int fr
= 0;
4349 n_total_au
= fsapi_get_au_stat(sbi
->host_sb
, VOL_AU_STAT_TOTAL
);
4350 n_clean_au
= fsapi_get_au_stat(sbi
->host_sb
, VOL_AU_STAT_CLEAN
);
4351 n_full_au
= fsapi_get_au_stat(sbi
->host_sb
, VOL_AU_STAT_FULL
);
4352 n_dirty_au
= n_total_au
- (n_full_au
+ n_clean_au
);
4356 else if (!n_clean_au
)
4359 fr
= (n_dirty_au
* 100) / (n_clean_au
+ n_dirty_au
);
4361 return snprintf(buf
, PAGE_SIZE
, "%u\n", fr
);
4363 SDFAT_ATTR(fratio
, 0444, fratio_show
, NULL
);
4365 static ssize_t
totalau_show(struct sdfat_sb_info
*sbi
, char *buf
)
4367 unsigned int n_au
= 0;
4369 n_au
= fsapi_get_au_stat(sbi
->host_sb
, VOL_AU_STAT_TOTAL
);
4370 return snprintf(buf
, PAGE_SIZE
, "%u\n", n_au
);
4372 SDFAT_ATTR(totalau
, 0444, totalau_show
, NULL
);
4374 static ssize_t
cleanau_show(struct sdfat_sb_info
*sbi
, char *buf
)
4376 unsigned int n_clean_au
= 0;
4378 n_clean_au
= fsapi_get_au_stat(sbi
->host_sb
, VOL_AU_STAT_CLEAN
);
4379 return snprintf(buf
, PAGE_SIZE
, "%u\n", n_clean_au
);
4381 SDFAT_ATTR(cleanau
, 0444, cleanau_show
, NULL
);
4383 static ssize_t
fullau_show(struct sdfat_sb_info
*sbi
, char *buf
)
4385 unsigned int n_full_au
= 0;
4387 n_full_au
= fsapi_get_au_stat(sbi
->host_sb
, VOL_AU_STAT_FULL
);
4388 return snprintf(buf
, PAGE_SIZE
, "%u\n", n_full_au
);
4390 SDFAT_ATTR(fullau
, 0444, fullau_show
, NULL
);
4392 static struct attribute
*sdfat_attrs
[] = {
4393 &sdfat_attr_type
.attr
,
4394 &sdfat_attr_eio
.attr
,
4395 &sdfat_attr_fratio
.attr
,
4396 &sdfat_attr_totalau
.attr
,
4397 &sdfat_attr_cleanau
.attr
,
4398 &sdfat_attr_fullau
.attr
,
4402 static struct kobj_type sdfat_ktype
= {
4403 .default_attrs
= sdfat_attrs
,
4404 .sysfs_ops
= &sdfat_attr_ops
,
4407 static ssize_t
version_show(struct kobject
*kobj
,
4408 struct kobj_attribute
*attr
, char *buff
)
4410 return snprintf(buff
, PAGE_SIZE
, "FS Version %s\n", SDFAT_VERSION
);
4413 static struct kobj_attribute version_attr
= __ATTR_RO(version
);
4415 static struct attribute
*attributes
[] = {
4420 static struct attribute_group attr_group
= {
4421 .attrs
= attributes
,
4424 /*======================================================================*/
4425 /* Super Block Read Operations */
4426 /*======================================================================*/
4457 static const match_table_t sdfat_tokens
= {
4458 {Opt_uid
, "uid=%u"},
4459 {Opt_gid
, "gid=%u"},
4460 {Opt_umask
, "umask=%o"},
4461 {Opt_dmask
, "dmask=%o"},
4462 {Opt_fmask
, "fmask=%o"},
4463 {Opt_allow_utime
, "allow_utime=%o"},
4464 {Opt_codepage
, "codepage=%u"},
4465 {Opt_charset
, "iocharset=%s"},
4467 {Opt_namecase
, "namecase=%u"},
4468 {Opt_tz_utc
, "tz=UTC"},
4469 {Opt_adj_hidsect
, "adj_hid"},
4470 {Opt_delay
, "delay"},
4471 {Opt_smart
, "smart"},
4472 {Opt_ausize
, "ausize=%u"},
4473 {Opt_packing
, "packing=%u"},
4474 {Opt_defrag
, "defrag"},
4475 {Opt_symlink
, "symlink=%u"},
4476 {Opt_debug
, "debug"},
4477 {Opt_err_cont
, "errors=continue"},
4478 {Opt_err_panic
, "errors=panic"},
4479 {Opt_err_ro
, "errors=remount-ro"},
4480 {Opt_discard
, "discard"},
4482 {Opt_adj_req
, "adj_req"},
4486 static int parse_options(struct super_block
*sb
, char *options
, int silent
,
4487 int *debug
, struct sdfat_mount_options
*opts
)
4490 substring_t args
[MAX_OPT_ARGS
];
4494 opts
->fs_uid
= current_uid();
4495 opts
->fs_gid
= current_gid();
4496 opts
->fs_fmask
= opts
->fs_dmask
= current
->fs
->umask
;
4497 opts
->allow_utime
= (unsigned short) -1;
4498 opts
->codepage
= sdfat_default_codepage
;
4499 opts
->iocharset
= sdfat_default_iocharset
;
4500 opts
->casesensitive
= 0;
4502 opts
->adj_hidsect
= 0;
4504 opts
->improved_allocation
= 0;
4505 opts
->amap_opt
.pack_ratio
= 0; // Default packing
4506 opts
->amap_opt
.sect_per_au
= 0;
4507 opts
->amap_opt
.misaligned_sect
= 0;
4509 opts
->errors
= SDFAT_ERRORS_RO
;
4516 while ((p
= strsep(&options
, ",")) != NULL
) {
4521 token
= match_token(p
, sdfat_tokens
, args
);
4524 if (match_int(&args
[0], &option
))
4526 opts
->fs_uid
= make_kuid(current_user_ns(), option
);
4529 if (match_int(&args
[0], &option
))
4531 opts
->fs_gid
= make_kgid(current_user_ns(), option
);
4536 if (match_octal(&args
[0], &option
))
4538 if (token
!= Opt_dmask
)
4539 opts
->fs_fmask
= option
;
4540 if (token
!= Opt_fmask
)
4541 opts
->fs_dmask
= option
;
4543 case Opt_allow_utime
:
4544 if (match_octal(&args
[0], &option
))
4546 opts
->allow_utime
= option
& (S_IWGRP
| S_IWOTH
);
4549 if (match_int(&args
[0], &option
))
4551 opts
->codepage
= option
;
4554 if (opts
->iocharset
!= sdfat_default_iocharset
)
4555 kfree(opts
->iocharset
);
4556 tmpstr
= match_strdup(&args
[0]);
4559 opts
->iocharset
= tmpstr
;
4562 if (match_int(&args
[0], &option
))
4564 opts
->casesensitive
= (option
> 0) ? 1:0;
4569 case Opt_adj_hidsect
:
4570 opts
->adj_hidsect
= 1;
4576 if (match_int(&args
[0], &option
))
4578 opts
->symlink
= option
> 0 ? 1 : 0;
4581 opts
->improved_allocation
|= SDFAT_ALLOC_DELAY
;
4584 opts
->improved_allocation
|= SDFAT_ALLOC_SMART
;
4587 if (match_int(&args
[0], &option
))
4589 if (!is_power_of_2(option
))
4591 opts
->amap_opt
.sect_per_au
= option
;
4592 IMSG("set AU size by option : %u sectors\n", option
);
4595 if (match_int(&args
[0], &option
))
4597 opts
->amap_opt
.pack_ratio
= option
;
4600 #ifdef CONFIG_SDFAT_DFR
4603 IMSG("defragmentation config is not enabled. ignore\n");
4607 opts
->errors
= SDFAT_ERRORS_CONT
;
4610 opts
->errors
= SDFAT_ERRORS_PANIC
;
4613 opts
->errors
= SDFAT_ERRORS_RO
;
4622 tmpstr
= match_strdup(&args
[0]);
4625 for (i
= 0; i
< FS_TYPE_MAX
; i
++) {
4626 if (!strcmp(tmpstr
, FS_TYPE_STR
[i
])) {
4627 opts
->fs_type
= (unsigned char)i
;
4628 sdfat_log_msg(sb
, KERN_ERR
,
4629 "set fs-type by option : %s",
4635 if (i
== FS_TYPE_MAX
) {
4636 sdfat_log_msg(sb
, KERN_ERR
,
4638 "only allow auto, exfat, vfat");
4643 #ifdef CONFIG_SDFAT_ALIGNED_MPAGE_WRITE
4646 IMSG("adjust request config is not enabled. ignore\n");
4651 sdfat_msg(sb
, KERN_ERR
,
4652 "unrecognized mount option \"%s\" "
4653 "or missing value", p
);
4660 if (opts
->allow_utime
== (unsigned short) -1)
4661 opts
->allow_utime
= ~opts
->fs_dmask
& (S_IWGRP
| S_IWOTH
);
4663 if (opts
->utf8
&& strcmp(opts
->iocharset
, sdfat_iocharset_with_utf8
)) {
4664 sdfat_msg(sb
, KERN_WARNING
,
4665 "utf8 enabled, \"iocharset=%s\" is recommended",
4666 sdfat_iocharset_with_utf8
);
4669 if (opts
->discard
) {
4670 struct request_queue
*q
= bdev_get_queue(sb
->s_bdev
);
4672 if (!blk_queue_discard(q
))
4673 sdfat_msg(sb
, KERN_WARNING
,
4674 "mounting with \"discard\" option, but "
4675 "the device does not support discard");
4682 static void sdfat_hash_init(struct super_block
*sb
)
4684 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
4687 spin_lock_init(&sbi
->inode_hash_lock
);
4688 for (i
= 0; i
< SDFAT_HASH_SIZE
; i
++)
4689 INIT_HLIST_HEAD(&sbi
->inode_hashtable
[i
]);
4692 static int sdfat_read_root(struct inode
*inode
)
4694 struct super_block
*sb
= inode
->i_sb
;
4695 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
4697 FS_INFO_T
*fsi
= &(sbi
->fsi
);
4700 ts
= CURRENT_TIME_SEC
;
4702 SDFAT_I(inode
)->fid
.dir
.dir
= fsi
->root_dir
;
4703 SDFAT_I(inode
)->fid
.dir
.flags
= 0x01;
4704 SDFAT_I(inode
)->fid
.entry
= -1;
4705 SDFAT_I(inode
)->fid
.start_clu
= fsi
->root_dir
;
4706 SDFAT_I(inode
)->fid
.flags
= 0x01;
4707 SDFAT_I(inode
)->fid
.type
= TYPE_DIR
;
4708 SDFAT_I(inode
)->fid
.version
= 0;
4709 SDFAT_I(inode
)->fid
.rwoffset
= 0;
4710 SDFAT_I(inode
)->fid
.hint_bmap
.off
= -1;
4711 SDFAT_I(inode
)->fid
.hint_stat
.eidx
= 0;
4712 SDFAT_I(inode
)->fid
.hint_stat
.clu
= fsi
->root_dir
;
4713 SDFAT_I(inode
)->fid
.hint_femp
.eidx
= -1;
4715 SDFAT_I(inode
)->target
= NULL
;
4717 if (fsapi_read_inode(inode
, &info
) < 0)
4720 inode
->i_uid
= sbi
->options
.fs_uid
;
4721 inode
->i_gid
= sbi
->options
.fs_gid
;
4723 inode
->i_generation
= 0;
4724 inode
->i_mode
= sdfat_make_mode(sbi
, ATTR_SUBDIR
, S_IRWXUGO
);
4725 inode
->i_op
= &sdfat_dir_inode_operations
;
4726 inode
->i_fop
= &sdfat_dir_operations
;
4728 i_size_write(inode
, info
.Size
);
4729 SDFAT_I(inode
)->fid
.size
= info
.Size
;
4730 inode
->i_blocks
= ((i_size_read(inode
) + (fsi
->cluster_size
- 1))
4731 & ~((loff_t
)fsi
->cluster_size
- 1)) >> inode
->i_blkbits
;
4732 SDFAT_I(inode
)->i_pos
= ((loff_t
) fsi
->root_dir
<< 32) | 0xffffffff;
4733 SDFAT_I(inode
)->i_size_aligned
= i_size_read(inode
);
4734 SDFAT_I(inode
)->i_size_ondisk
= i_size_read(inode
);
4736 sdfat_save_attr(inode
, ATTR_SUBDIR
);
4737 inode
->i_mtime
= inode
->i_atime
= inode
->i_ctime
= ts
;
4738 set_nlink(inode
, info
.NumSubdirs
+ 2);
4744 static void setup_dops(struct super_block
*sb
)
4746 if (SDFAT_SB(sb
)->options
.casesensitive
== 0)
4747 sb
->s_d_op
= &sdfat_ci_dentry_ops
;
4749 sb
->s_d_op
= &sdfat_dentry_ops
;
4752 static int sdfat_fill_super(struct super_block
*sb
, void *data
, int silent
)
4754 struct inode
*root_inode
= NULL
;
4755 struct sdfat_sb_info
*sbi
;
4759 struct block_device
*bdev
= sb
->s_bdev
;
4760 dev_t bd_dev
= bdev
? bdev
->bd_dev
: 0;
4762 sdfat_log_msg(sb
, KERN_INFO
, "trying to mount...");
4765 * GFP_KERNEL is ok here, because while we do hold the
4766 * supeblock lock, memory pressure can't call back into
4767 * the filesystem, since we're only just about to mount
4768 * it and have no inodes etc active!
4770 sbi
= kzalloc(sizeof(struct sdfat_sb_info
), GFP_KERNEL
);
4772 sdfat_log_msg(sb
, KERN_INFO
,
4773 "trying to alloc sbi with vzalloc()");
4774 sbi
= vzalloc(sizeof(struct sdfat_sb_info
));
4776 sdfat_log_msg(sb
, KERN_ERR
, "failed to mount! (ENOMEM)");
4779 sbi
->use_vmalloc
= 1;
4782 mutex_init(&sbi
->s_vlock
);
4783 sb
->s_fs_info
= sbi
;
4784 sb
->s_flags
|= MS_NODIRATIME
;
4785 sb
->s_magic
= SDFAT_SUPER_MAGIC
;
4786 sb
->s_op
= &sdfat_sops
;
4787 ratelimit_state_init(&sbi
->ratelimit
, DEFAULT_RATELIMIT_INTERVAL
,
4788 DEFAULT_RATELIMIT_BURST
);
4789 err
= parse_options(sb
, data
, silent
, &debug
, &sbi
->options
);
4791 sdfat_log_msg(sb
, KERN_ERR
, "failed to parse options");
4795 setup_sdfat_xattr_handler(sb
);
4796 setup_sdfat_sync_super_wq(sb
);
4799 err
= fsapi_mount(sb
);
4801 sdfat_log_msg(sb
, KERN_ERR
, "failed to recognize fat type");
4805 /* set up enough so that it can read an inode */
4806 sdfat_hash_init(sb
);
4809 * The low byte of FAT's first entry must have same value with
4810 * media-field. But in real world, too many devices is
4811 * writing wrong value. So, removed that validity check.
4813 * if (FAT_FIRST_ENT(sb, media) != first)
4817 sprintf(buf
, "cp%d", sbi
->options
.codepage
);
4818 sbi
->nls_disk
= load_nls(buf
);
4819 if (!sbi
->nls_disk
) {
4820 sdfat_log_msg(sb
, KERN_ERR
, "codepage %s not found", buf
);
4824 sbi
->nls_io
= load_nls(sbi
->options
.iocharset
);
4826 sdfat_log_msg(sb
, KERN_ERR
, "IO charset %s not found",
4827 sbi
->options
.iocharset
);
4831 err
= __alloc_dfr_mem_if_required(sb
);
4833 sdfat_log_msg(sb
, KERN_ERR
, "failed to initialize a memory for "
4839 root_inode
= new_inode(sb
);
4841 sdfat_log_msg(sb
, KERN_ERR
, "failed to allocate root inode.");
4845 root_inode
->i_ino
= SDFAT_ROOT_INO
;
4846 root_inode
->i_version
= 1;
4848 err
= sdfat_read_root(root_inode
);
4850 sdfat_log_msg(sb
, KERN_ERR
, "failed to initialize root inode.");
4854 sdfat_attach(root_inode
, SDFAT_I(root_inode
)->i_pos
);
4855 insert_inode_hash(root_inode
);
4858 sb
->s_root
= __d_make_root(root_inode
);
4860 sdfat_msg(sb
, KERN_ERR
, "failed to get the root dentry");
4865 * Initialize filesystem attributes (for sysfs)
4866 * ex: /sys/fs/sdfat/mmcblk1[179:17]
4868 sbi
->sb_kobj
.kset
= sdfat_kset
;
4869 err
= kobject_init_and_add(&sbi
->sb_kobj
, &sdfat_ktype
, NULL
,
4870 "%s[%d:%d]", sb
->s_id
, MAJOR(bd_dev
), MINOR(bd_dev
));
4872 sdfat_msg(sb
, KERN_ERR
, "Unable to create sdfat attributes for"
4873 " %s[%d:%d](%d)", sb
->s_id
,
4874 MAJOR(bd_dev
), MINOR(bd_dev
), err
);
4878 sdfat_log_msg(sb
, KERN_INFO
, "mounted successfully!");
4880 sdfat_statistics_set_mnt(&sbi
->fsi
);
4881 sdfat_statistics_set_vol_size(sb
);
4885 __free_dfr_mem_if_required(sb
);
4889 sdfat_log_msg(sb
, KERN_INFO
, "failed to mount! (%d)", err
);
4896 unload_nls(sbi
->nls_io
);
4898 unload_nls(sbi
->nls_disk
);
4899 if (sbi
->options
.iocharset
!= sdfat_default_iocharset
)
4900 kfree(sbi
->options
.iocharset
);
4901 sb
->s_fs_info
= NULL
;
4902 if (!sbi
->use_vmalloc
)
4909 static struct dentry
*sdfat_fs_mount(struct file_system_type
*fs_type
,
4910 int flags
, const char *dev_name
, void *data
) {
4911 return mount_bdev(fs_type
, flags
, dev_name
, data
, sdfat_fill_super
);
4914 static void init_once(void *foo
)
4916 struct sdfat_inode_info
*ei
= (struct sdfat_inode_info
*)foo
;
4918 INIT_HLIST_NODE(&ei
->i_hash_fat
);
4919 inode_init_once(&ei
->vfs_inode
);
4922 static int __init
sdfat_init_inodecache(void)
4924 sdfat_inode_cachep
= kmem_cache_create("sdfat_inode_cache",
4925 sizeof(struct sdfat_inode_info
),
4926 0, (SLAB_RECLAIM_ACCOUNT
|SLAB_MEM_SPREAD
),
4928 if (!sdfat_inode_cachep
)
4933 static void sdfat_destroy_inodecache(void)
4935 kmem_cache_destroy(sdfat_inode_cachep
);
4938 #ifdef CONFIG_SDFAT_DBG_IOCTL
4939 static void sdfat_debug_kill_sb(struct super_block
*sb
)
4941 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
4942 struct block_device
*bdev
= sb
->s_bdev
;
4947 flags
= sbi
->debug_flags
;
4949 if (flags
& SDFAT_DEBUGFLAGS_INVALID_UMOUNT
) {
4950 /* invalidate_bdev drops all device cache include dirty.
4951 * we use this to simulate device removal
4953 fsapi_cache_release(sb
);
4954 invalidate_bdev(bdev
);
4958 kill_block_super(sb
);
4960 #endif /* CONFIG_SDFAT_DBG_IOCTL */
4962 static struct file_system_type sdfat_fs_type
= {
4963 .owner
= THIS_MODULE
,
4965 .mount
= sdfat_fs_mount
,
4966 #ifdef CONFIG_SDFAT_DBG_IOCTL
4967 .kill_sb
= sdfat_debug_kill_sb
,
4969 .kill_sb
= kill_block_super
,
4970 #endif /* CONFIG_SDFAT_DBG_IOCTL */
4971 .fs_flags
= FS_REQUIRES_DEV
,
4974 static int __init
init_sdfat_fs(void)
4978 sdfat_log_version();
4983 sdfat_kset
= kset_create_and_add("sdfat", NULL
, fs_kobj
);
4985 pr_err("[SDFAT] failed to create fs_kobj\n");
4990 err
= sysfs_create_group(&sdfat_kset
->kobj
, &attr_group
);
4992 pr_err("[SDFAT] failed to create sdfat version attributes\n");
4996 err
= sdfat_statistics_init(sdfat_kset
);
5000 err
= sdfat_init_inodecache();
5002 pr_err("[SDFAT] failed to initialize inode cache\n");
5006 err
= register_filesystem(&sdfat_fs_type
);
5008 pr_err("[SDFAT] failed to register filesystem\n");
5014 sdfat_statistics_uninit();
5017 sysfs_remove_group(&sdfat_kset
->kobj
, &attr_group
);
5018 kset_unregister(sdfat_kset
);
5022 sdfat_destroy_inodecache();
5025 pr_err("[SDFAT] failed to initialize FS driver(err:%d)\n", err
);
5029 static void __exit
exit_sdfat_fs(void)
5031 sdfat_statistics_uninit();
5034 sysfs_remove_group(&sdfat_kset
->kobj
, &attr_group
);
5035 kset_unregister(sdfat_kset
);
5039 sdfat_destroy_inodecache();
5040 unregister_filesystem(&sdfat_fs_type
);
5045 module_init(init_sdfat_fs
);
5046 module_exit(exit_sdfat_fs
);
5048 MODULE_LICENSE("GPL");
5049 MODULE_DESCRIPTION("FAT/exFAT filesystem support");
5050 MODULE_AUTHOR("Samsung Electronics Co., Ltd.");