config SDFAT_ALIGNED_MPAGE_WRITE
bool "Enable supporting aligned mpage_write"
- default y
+ default y if SDFAT_FS=y
+ default n if SDFAT_FS=m
depends on SDFAT_FS
config SDFAT_VIRTUAL_XATTR
bool "Virtual xattr support for sdFAT"
- default y
+ default n
depends on SDFAT_FS
help
- To support virtual xattr.
+ If you enable this feature, it supports virtual xattr.
+ This feature will be deprecated because it might be the same with
+ "context" mount option.
config SDFAT_VIRTUAL_XATTR_SELINUX_LABEL
string "Default string for SELinux label"
}
}
- if ((PACKING_HARDLIMIT) && amap->n_need_packing >= PACKING_HARDLIMIT) {
+ if ((PACKING_HARDLIMIT != 0) &&
+ amap->n_need_packing >= PACKING_HARDLIMIT) {
/* Compulsory SLC flushing:
* If there was no chance to do best-fit packing
* and the # of AU-aligned allocation exceeds HARD threshold,
fsi->prev_eio |= SDFAT_EIO_BDI;
sdfat_log_msg(sb, KERN_ERR, "%s: block device is "
"eliminated.(bdi:%p)", __func__, sb->s_bdi);
- sdfat_debug_warn_on(1);
}
return -ENXIO;
}
return 0;
}
-
-/* Make a readahead request */
-s32 bdev_readahead(struct super_block *sb, u64 secno, u64 num_secs)
+#if IS_BUILTIN(CONFIG_SDFAT_FS)
+static void __bdev_readahead(struct super_block *sb, u64 secno, u64 num_secs)
{
- FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
u32 sects_per_page = (PAGE_SIZE >> sb->s_blocksize_bits);
struct blk_plug plug;
u64 i;
- if (!fsi->bd_opened)
- return -EIO;
-
blk_start_plug(&plug);
for (i = 0; i < num_secs; i++) {
if (i && !(i & (sects_per_page - 1)))
sb_breadahead(sb, (sector_t)(secno + i));
}
blk_finish_plug(&plug);
+}
+#else
+static void __bdev_readahead(struct super_block *sb, u64 secno, u64 num_secs)
+{
+ u64 i;
+
+ for (i = 0; i < num_secs; i++)
+ sb_breadahead(sb, (sector_t)(secno + i));
+}
+#endif
+
+/* Make a readahead request */
+s32 bdev_readahead(struct super_block *sb, u64 secno, u64 num_secs)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ if (!fsi->bd_opened)
+ return -EIO;
+
+ __bdev_readahead(sb, secno, num_secs);
return 0;
}
/* skip updating volume dirty flag,
* if this volume has been mounted with read-only
*/
- if (sb->s_flags & MS_RDONLY)
+ if (sb_rdonly(sb))
return 0;
if (!fsi->pbr_bh) {
bpb->bsx.state = new_flag & VOL_DIRTY ? FAT_VOL_DIRTY : 0x00;
} else { /* FAT16/12 */
pbr16_t *bpb = (pbr16_t *) fsi->pbr_bh->b_data;
- bpb->bpb.state = new_flag & VOL_DIRTY ? FAT_VOL_DIRTY : 0x00;
+ bpb->bpb.f16.state = new_flag & VOL_DIRTY ?
+ FAT_VOL_DIRTY : 0x00;
}
if (always_sync)
static bool is_fat32(pbr_t *pbr)
{
- if (le16_to_cpu(pbr->bpb.f16.num_fat_sectors))
+ if (le16_to_cpu(pbr->bpb.fat.num_fat_sectors))
return false;
return true;
}
if (is_exfat(p_pbr))
logical_sect = 1 << p_pbr->bsx.f64.sect_size_bits;
else
- logical_sect = get_unaligned_le16(&p_pbr->bpb.f16.sect_size);
+ logical_sect = get_unaligned_le16(&p_pbr->bpb.fat.sect_size);
/* is x a power of 2?
* (x) != 0 && (((x) & ((x) - 1)) == 0)
opts->improved_allocation = 0;
opts->defrag = 0;
ret = mount_exfat(sb, p_pbr);
- } else if (is_fat32(p_pbr)) {
- if (opts->fs_type && opts->fs_type != FS_TYPE_VFAT) {
- sdfat_log_msg(sb, KERN_ERR,
- "not specified filesystem type "
- "(media:vfat, opts:%s)",
- FS_TYPE_STR[opts->fs_type]);
- ret = -EINVAL;
- goto free_bh;
- }
- /* set maximum file size for FAT */
- sb->s_maxbytes = 0xffffffff;
- ret = mount_fat32(sb, p_pbr);
} else {
if (opts->fs_type && opts->fs_type != FS_TYPE_VFAT) {
sdfat_log_msg(sb, KERN_ERR,
}
/* set maximum file size for FAT */
sb->s_maxbytes = 0xffffffff;
- opts->improved_allocation = 0;
- opts->defrag = 0;
- ret = mount_fat16(sb, p_pbr);
+
+ if (is_fat32(p_pbr)) {
+ ret = mount_fat32(sb, p_pbr);
+ } else {
+ opts->improved_allocation = 0;
+ opts->defrag = 0;
+ ret = mount_fat16(sb, p_pbr);
+ }
}
free_bh:
brelse(tmp_bh);
/* warn misaligned data data start sector must be a multiple of clu_size */
sdfat_log_msg(sb, KERN_INFO,
"detected volume info : %s "
- "(bps : %lu, spc : %u, data start : %llu, %s)",
+ "(%04hX-%04hX, bps : %lu, spc : %u, data start : %llu, %s)",
sdfat_get_vol_type_str(fsi->vol_type),
+ (fsi->vol_id >> 16) & 0xffff, fsi->vol_id & 0xffff,
sb->s_blocksize, fsi->sect_per_clus, fsi->data_start_sector,
(fsi->data_start_sector & (fsi->sect_per_clus - 1)) ?
"misaligned" : "aligned");
ep2 = ep;
}
- fsi->fs_func->set_entry_time(ep, tm_now(SDFAT_SB(sb), &tm), TM_MODIFY);
+ fsi->fs_func->set_entry_time(ep, tm_now(inode, &tm), TM_MODIFY);
fsi->fs_func->set_entry_attr(ep, fid->attr);
if (modified) {
ep2 = ep;
}
- fsi->fs_func->set_entry_time(ep, tm_now(SDFAT_SB(sb), &tm), TM_MODIFY);
+ fsi->fs_func->set_entry_time(ep, tm_now(inode, &tm), TM_MODIFY);
fsi->fs_func->set_entry_attr(ep, fid->attr);
/*
void *__buf; // __buf should be the last member
} ENTRY_SET_CACHE_T;
-
+/*----------------------------------------------------------------------*/
+/* Inline Functions */
+/*----------------------------------------------------------------------*/
+static inline bool is_valid_clus(FS_INFO_T *fsi, u32 clus)
+{
+ if (clus < CLUS_BASE || fsi->num_clusters <= clus)
+ return false;
+ return true;
+}
/*----------------------------------------------------------------------*/
/* External Function Declarations */
ep->size = cpu_to_le64(size);
} /* end of exfat_set_entry_size */
+
+#define TENS_MS_PER_SEC (100)
+#define SEC_TO_TENS_MS(sec) (((sec) & 0x01) ? TENS_MS_PER_SEC : 0)
+#define TENS_MS_TO_SEC(tens_ms) (((tens_ms) / TENS_MS_PER_SEC) ? 1 : 0)
+
static void exfat_get_entry_time(DENTRY_T *p_entry, TIMESTAMP_T *tp, u8 mode)
{
- u16 t = 0x00, d = 0x21, tz = 0x00;
+ u16 t = 0x00, d = 0x21, tz = 0x00, s = 0x00;
FILE_DENTRY_T *ep = (FILE_DENTRY_T *)p_entry;
switch (mode) {
case TM_CREATE:
t = le16_to_cpu(ep->create_time);
d = le16_to_cpu(ep->create_date);
+ s = TENS_MS_TO_SEC(ep->create_time_ms);
tz = ep->create_tz;
break;
case TM_MODIFY:
t = le16_to_cpu(ep->modify_time);
d = le16_to_cpu(ep->modify_date);
+ s = TENS_MS_TO_SEC(ep->modify_time_ms);
tz = ep->modify_tz;
break;
case TM_ACCESS:
}
tp->tz.value = tz;
- tp->sec = (t & 0x001F) << 1;
+ tp->sec = ((t & 0x001F) << 1) + s;
tp->min = (t >> 5) & 0x003F;
tp->hour = (t >> 11);
tp->day = (d & 0x001F);
switch (mode) {
case TM_CREATE:
ep->create_time = cpu_to_le16(t);
+ ep->create_time_ms = SEC_TO_TENS_MS(tp->sec);
ep->create_date = cpu_to_le16(d);
ep->create_tz = tp->tz.value;
break;
case TM_MODIFY:
ep->modify_time = cpu_to_le16(t);
ep->modify_date = cpu_to_le16(d);
+ ep->modify_time_ms = (tp->sec & 0x1) ? TENS_MS_PER_SEC : 0;
ep->modify_tz = tp->tz.value;
break;
case TM_ACCESS:
exfat_set_entry_type((DENTRY_T *) ep, type);
- tp = tm_now(SDFAT_SB(sb), &tm);
+ tp = tm_now_sb(sb, &tm);
exfat_set_entry_time((DENTRY_T *) ep, tp, TM_CREATE);
exfat_set_entry_time((DENTRY_T *) ep, tp, TM_MODIFY);
exfat_set_entry_time((DENTRY_T *) ep, tp, TM_ACCESS);
- ep->create_time_ms = 0;
- ep->modify_time_ms = 0;
} /* end of __init_file_entry */
static void __init_strm_entry(STRM_DENTRY_T *ep, u8 flags, u32 start_clu, u64 size)
}
/* check cluster validation */
- if ((p_chain->dir < 2) && (p_chain->dir >= fsi->num_clusters)) {
+ if (!is_valid_clus(fsi, p_chain->dir)) {
EMSG("%s: invalid start cluster (%u)\n", __func__, p_chain->dir);
sdfat_debug_bug_on(1);
return -EIO;
}
/* check cluster validation */
- if ((hint_clu < CLUS_BASE) && (hint_clu >= fsi->num_clusters)) {
- EMSG("%s: hint_cluster is invalid (%u)\n", __func__, hint_clu);
- ASSERT(0);
+ if (!is_valid_clus(fsi, hint_clu)) {
+ /* "last + 1" can be passed as hint_clu. Otherwise, bug_on */
+ if (hint_clu != fsi->num_clusters) {
+ EMSG("%s: hint_cluster is invalid (%u)\n",
+ __func__, hint_clu);
+ sdfat_debug_bug_on(1);
+ }
hint_clu = CLUS_BASE;
if (p_chain->flags == 0x03) {
if (exfat_chain_cont_cluster(sb, p_chain->dir, num_clusters))
pbr64_t *p_bpb = (pbr64_t *)p_pbr;
FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ fsi->sect_per_clus = 1 << p_bpb->bsx.sect_per_clus_bits;
+ fsi->sect_per_clus_bits = p_bpb->bsx.sect_per_clus_bits;
+ fsi->cluster_size_bits = fsi->sect_per_clus_bits + sb->s_blocksize_bits;
+ fsi->cluster_size = 1 << fsi->cluster_size_bits;
+
if (!p_bpb->bsx.num_fats) {
sdfat_msg(sb, KERN_ERR, "bogus number of FAT structure");
return -EINVAL;
}
- fsi->sect_per_clus = 1 << p_bpb->bsx.sect_per_clus_bits;
- fsi->sect_per_clus_bits = p_bpb->bsx.sect_per_clus_bits;
- fsi->cluster_size_bits = fsi->sect_per_clus_bits + sb->s_blocksize_bits;
- fsi->cluster_size = 1 << fsi->cluster_size_bits;
+ if (p_bpb->bsx.num_fats >= 2) {
+ sdfat_msg(sb, KERN_WARNING,
+ "unsupported number of FAT structure :%u, try with 1",
+ p_bpb->bsx.num_fats);
+ }
fsi->num_FAT_sectors = le32_to_cpu(p_bpb->bsx.fat_length);
+ if (!fsi->num_FAT_sectors) {
+ sdfat_msg(sb, KERN_ERR, "bogus fat size");
+ return -EINVAL;
+ }
fsi->FAT1_start_sector = le32_to_cpu(p_bpb->bsx.fat_offset);
- if (p_bpb->bsx.num_fats == 1)
- fsi->FAT2_start_sector = fsi->FAT1_start_sector;
- else
- fsi->FAT2_start_sector = fsi->FAT1_start_sector + fsi->num_FAT_sectors;
+ fsi->FAT2_start_sector = fsi->FAT1_start_sector;
fsi->root_start_sector = le32_to_cpu(p_bpb->bsx.clu_offset);
fsi->data_start_sector = fsi->root_start_sector;
fsi->num_sectors = le64_to_cpu(p_bpb->bsx.vol_length);
- fsi->num_clusters = le32_to_cpu(p_bpb->bsx.clu_count) + 2;
+ if (!fsi->num_sectors) {
+ sdfat_msg(sb, KERN_ERR, "bogus number of total sector count");
+ return -EINVAL;
+ }
+
/* because the cluster index starts with 2 */
+ fsi->num_clusters = le32_to_cpu(p_bpb->bsx.clu_count) + CLUS_BASE;
- fsi->vol_type = EXFAT;
fsi->vol_id = le32_to_cpu(p_bpb->bsx.vol_serial);
-
fsi->root_dir = le32_to_cpu(p_bpb->bsx.root_cluster);
fsi->dentries_in_root = 0;
fsi->dentries_per_clu = 1 << (fsi->cluster_size_bits - DENTRY_SIZE_BITS);
-
fsi->vol_flag = (u32) le16_to_cpu(p_bpb->bsx.vol_flags);
fsi->clu_srch_ptr = CLUS_BASE;
fsi->used_clusters = (u32) ~0;
fsi->fs_func = &exfat_fs_func;
+ fsi->vol_type = EXFAT;
fat_ent_ops_init(sb);
if (p_bpb->bsx.vol_flags & VOL_DIRTY) {
}
/* check cluster validation */
- if ((p_chain->dir < 2) && (p_chain->dir >= fsi->num_clusters)) {
+ if (!is_valid_clus(fsi, p_chain->dir)) {
EMSG("%s: invalid start cluster (%u)\n", __func__, p_chain->dir);
sdfat_debug_bug_on(1);
return -EIO;
ep->start_clu_hi = cpu_to_le16(CLUSTER_16(start_clu >> 16));
ep->size = 0;
- tp = tm_now(SDFAT_SB(sb), &tm);
+ tp = tm_now_sb(sb, &tm);
fat_set_entry_time((DENTRY_T *) ep, tp, TM_CREATE);
fat_set_entry_time((DENTRY_T *) ep, tp, TM_MODIFY);
ep->access_date = 0;
.get_au_stat = amap_get_au_stat,
};
-s32 mount_fat16(struct super_block *sb, pbr_t *p_pbr)
+static s32 mount_fat_common(struct super_block *sb, FS_INFO_T *fsi,
+ bpb_t *p_bpb, u32 root_sects)
{
- s32 num_root_sectors;
- bpb16_t *p_bpb = &(p_pbr->bpb.f16);
- FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ bool fat32 = root_sects == 0 ? true : false;
- if (!p_bpb->num_fats) {
- sdfat_msg(sb, KERN_ERR, "bogus number of FAT structure");
+ fsi->sect_per_clus = p_bpb->sect_per_clus;
+ if (!is_power_of_2(fsi->sect_per_clus)) {
+ sdfat_msg(sb, KERN_ERR, "bogus sectors per cluster %u",
+ fsi->sect_per_clus);
return -EINVAL;
}
- num_root_sectors = get_unaligned_le16(p_bpb->num_root_entries) << DENTRY_SIZE_BITS;
- num_root_sectors = ((num_root_sectors-1) >> sb->s_blocksize_bits) + 1;
-
- fsi->sect_per_clus = p_bpb->sect_per_clus;
fsi->sect_per_clus_bits = ilog2(p_bpb->sect_per_clus);
fsi->cluster_size_bits = fsi->sect_per_clus_bits + sb->s_blocksize_bits;
fsi->cluster_size = 1 << fsi->cluster_size_bits;
+ fsi->dentries_per_clu = 1 <<
+ (fsi->cluster_size_bits - DENTRY_SIZE_BITS);
+
+ fsi->vol_flag = VOL_CLEAN;
+ fsi->clu_srch_ptr = CLUS_BASE;
+ fsi->used_clusters = (u32)~0;
+ fsi->fs_func = &fat_fs_func;
fsi->num_FAT_sectors = le16_to_cpu(p_bpb->num_fat_sectors);
+ if (fat32) {
+ u32 fat32_len = le32_to_cpu(p_bpb->f32.num_fat32_sectors);
+
+ if (fat32_len) {
+ fsi->num_FAT_sectors = fat32_len;
+ } else if (fsi->num_FAT_sectors) {
+ /* SPEC violation for compatibility */
+ sdfat_msg(sb, KERN_WARNING,
+ "no fatsz32, try with fatsz16: %u",
+ fsi->num_FAT_sectors);
+ }
+ }
+
+ if (!fsi->num_FAT_sectors) {
+ sdfat_msg(sb, KERN_ERR, "bogus fat size");
+ return -EINVAL;
+ }
+
+ if (!p_bpb->num_fats) {
+ sdfat_msg(sb, KERN_ERR, "bogus number of FAT structure");
+ return -EINVAL;
+ }
+
+ if (p_bpb->num_fats > 2) {
+ sdfat_msg(sb, KERN_WARNING,
+ "unsupported number of FAT structure :%u, try with 2",
+ p_bpb->num_fats);
+ }
fsi->FAT1_start_sector = le16_to_cpu(p_bpb->num_reserved);
if (p_bpb->num_fats == 1)
fsi->FAT2_start_sector = fsi->FAT1_start_sector;
else
- fsi->FAT2_start_sector = fsi->FAT1_start_sector + fsi->num_FAT_sectors;
+ fsi->FAT2_start_sector = fsi->FAT1_start_sector +
+ fsi->num_FAT_sectors;
fsi->root_start_sector = fsi->FAT2_start_sector + fsi->num_FAT_sectors;
- fsi->data_start_sector = fsi->root_start_sector + num_root_sectors;
+ fsi->data_start_sector = fsi->root_start_sector + root_sects;
+ /* SPEC violation for compatibility */
fsi->num_sectors = get_unaligned_le16(p_bpb->num_sectors);
if (!fsi->num_sectors)
fsi->num_sectors = le32_to_cpu(p_bpb->num_huge_sectors);
return -EINVAL;
}
- fsi->num_clusters = (u32)((fsi->num_sectors - fsi->data_start_sector) >> fsi->sect_per_clus_bits) + CLUS_BASE;
/* because the cluster index starts with 2 */
+ fsi->num_clusters = (u32)((fsi->num_sectors - fsi->data_start_sector) >>
+ fsi->sect_per_clus_bits) + CLUS_BASE;
- fsi->vol_type = FAT16;
- if (fsi->num_clusters < FAT12_THRESHOLD)
- fsi->vol_type = FAT12;
+ return 0;
+}
- fsi->vol_id = get_unaligned_le32(p_bpb->vol_serial);
+s32 mount_fat16(struct super_block *sb, pbr_t *p_pbr)
+{
+ u32 num_root_sectors;
+ bpb_t *p_bpb = &(p_pbr->bpb.fat);
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ fsi->vol_id = get_unaligned_le32(p_bpb->f16.vol_serial);
fsi->root_dir = 0;
fsi->dentries_in_root = get_unaligned_le16(p_bpb->num_root_entries);
if (!fsi->dentries_in_root) {
return -EINVAL;
}
- fsi->dentries_per_clu = 1 << (fsi->cluster_size_bits - DENTRY_SIZE_BITS);
+ num_root_sectors = fsi->dentries_in_root << DENTRY_SIZE_BITS;
+ num_root_sectors = ((num_root_sectors - 1) >> sb->s_blocksize_bits) + 1;
- fsi->vol_flag = VOL_CLEAN;
- fsi->clu_srch_ptr = 2;
- fsi->used_clusters = (u32) ~0;
+ if (mount_fat_common(sb, fsi, p_bpb, num_root_sectors))
+ return -EINVAL;
- fsi->fs_func = &fat_fs_func;
+ fsi->vol_type = FAT16;
+ if (fsi->num_clusters < FAT12_THRESHOLD)
+ fsi->vol_type = FAT12;
fat_ent_ops_init(sb);
- if (p_bpb->state & FAT_VOL_DIRTY) {
+ if (p_bpb->f16.state & FAT_VOL_DIRTY) {
fsi->vol_flag |= VOL_DIRTY;
sdfat_log_msg(sb, KERN_WARNING, "Volume was not properly "
"unmounted. Some data may be corrupt. "
pbr32_t *p_bpb = (pbr32_t *)p_pbr;
FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
- if (!p_bpb->bpb.num_fats) {
- sdfat_msg(sb, KERN_ERR, "bogus number of FAT structure");
- return -EINVAL;
- }
-
- fsi->sect_per_clus = p_bpb->bpb.sect_per_clus;
- fsi->sect_per_clus_bits = ilog2(p_bpb->bpb.sect_per_clus);
- fsi->cluster_size_bits = fsi->sect_per_clus_bits + sb->s_blocksize_bits;
- fsi->cluster_size = 1 << fsi->cluster_size_bits;
-
- fsi->num_FAT_sectors = le32_to_cpu(p_bpb->bpb.num_fat32_sectors);
-
- fsi->FAT1_start_sector = le16_to_cpu(p_bpb->bpb.num_reserved);
- if (p_bpb->bpb.num_fats == 1)
- fsi->FAT2_start_sector = fsi->FAT1_start_sector;
- else
- fsi->FAT2_start_sector = fsi->FAT1_start_sector + fsi->num_FAT_sectors;
-
- fsi->root_start_sector = fsi->FAT2_start_sector + fsi->num_FAT_sectors;
- fsi->data_start_sector = fsi->root_start_sector;
-
- /* SPEC violation for compatibility */
- fsi->num_sectors = get_unaligned_le16(p_bpb->bpb.num_sectors);
- if (!fsi->num_sectors)
- fsi->num_sectors = le32_to_cpu(p_bpb->bpb.num_huge_sectors);
-
- /* 2nd check */
- if (!fsi->num_sectors) {
- sdfat_msg(sb, KERN_ERR, "bogus number of total sector count");
- return -EINVAL;
- }
-
- fsi->num_clusters = (u32)((fsi->num_sectors - fsi->data_start_sector) >> fsi->sect_per_clus_bits) + CLUS_BASE;
- /* because the cluster index starts with 2 */
-
- fsi->vol_type = FAT32;
fsi->vol_id = get_unaligned_le32(p_bpb->bsx.vol_serial);
-
- fsi->root_dir = le32_to_cpu(p_bpb->bpb.root_cluster);
+ fsi->root_dir = le32_to_cpu(p_bpb->bpb.f32.root_cluster);
fsi->dentries_in_root = 0;
- fsi->dentries_per_clu = 1 << (fsi->cluster_size_bits - DENTRY_SIZE_BITS);
- fsi->vol_flag = VOL_CLEAN;
- fsi->clu_srch_ptr = 2;
- fsi->used_clusters = (u32) ~0;
+ if (mount_fat_common(sb, fsi, &p_bpb->bpb, 0))
+ return -EINVAL;
- fsi->fs_func = &fat_fs_func;
+ /* Should be initialized before calling amap_create() */
+ fsi->vol_type = FAT32;
+ fat_ent_ops_init(sb);
/* Delayed / smart allocation related init */
fsi->reserved_clusters = 0;
- /* Should be initialized before calling amap_create() */
- fat_ent_ops_init(sb);
-
/* AU Map Creation */
if (SDFAT_SB(sb)->options.improved_allocation & SDFAT_ALLOC_SMART) {
u32 hidden_sectors = le32_to_cpu(p_bpb->bpb.num_hid_sectors);
u32 calc_hid_sect = 0;
int ret;
-
/* calculate hidden sector size */
calc_hid_sect = __calc_hidden_sect(sb);
if (calc_hid_sect != hidden_sectors) {
return false;
}
-static inline bool is_valid_clus(FS_INFO_T *fsi, u32 clus)
-{
- if (clus < CLUS_BASE || fsi->num_clusters <= clus)
- return false;
- return true;
-}
-
s32 fat_ent_get(struct super_block *sb, u32 loc, u32 *content)
{
FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
/*************************************************************************
* FUNCTIONS WHICH HAS KERNEL VERSION DEPENDENCY
*************************************************************************/
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0)
-#define CURRENT_TIME_SEC timespec64_trunc(current_kernel_time64(), NSEC_PER_SEC)
-#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
-#define CURRENT_TIME_SEC timespec_trunc(current_kernel_time(), NSEC_PER_SEC)
-#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) */
- /* EMPTY */
-#endif
-
-
#ifdef CONFIG_SDFAT_UEVENT
static struct kobject sdfat_uevent_kobj;
char major[16], minor[16];
char *envp[] = { major, minor, NULL };
+ /* Do not trigger uevent if a device has been ejected */
+ if (fsapi_check_bdi_valid(sb))
+ return;
+
snprintf(major, sizeof(major), "MAJOR=%d", MAJOR(bd_dev));
snprintf(minor, sizeof(minor), "MINOR=%d", MINOR(bd_dev));
pr_err("[SDFAT](%s[%d:%d]):ERR: %pV\n",
sb->s_id, MAJOR(bd_dev), MINOR(bd_dev), &vaf);
#ifdef CONFIG_SDFAT_SUPPORT_STLOG
- if (opts->errors == SDFAT_ERRORS_RO && !(sb->s_flags & MS_RDONLY)) {
+ if (opts->errors == SDFAT_ERRORS_RO && !sb_rdonly(sb)) {
ST_LOG("[SDFAT](%s[%d:%d]):ERR: %pV\n",
sb->s_id, MAJOR(bd_dev), MINOR(bd_dev), &vaf);
}
if (opts->errors == SDFAT_ERRORS_PANIC) {
panic("[SDFAT](%s[%d:%d]): fs panic from previous error\n",
sb->s_id, MAJOR(bd_dev), MINOR(bd_dev));
- } else if (opts->errors == SDFAT_ERRORS_RO && !(sb->s_flags & MS_RDONLY)) {
- sb->s_flags |= MS_RDONLY;
+ } else if (opts->errors == SDFAT_ERRORS_RO && !sb_rdonly(sb)) {
+ sb->s_flags |= SB_RDONLY;
sdfat_statistics_set_mnt_ro();
pr_err("[SDFAT](%s[%d:%d]): Filesystem has been set "
"read-only\n", sb->s_id, MAJOR(bd_dev), MINOR(bd_dev));
tp->Year = year;
}
-TIMESTAMP_T *tm_now(struct sdfat_sb_info *sbi, TIMESTAMP_T *tp)
+TIMESTAMP_T *tm_now(struct inode *inode, TIMESTAMP_T *tp)
{
- sdfat_timespec_t ts = CURRENT_TIME_SEC;
+ sdfat_timespec_t ts = current_time(inode);
DATE_TIME_T dt;
- sdfat_time_unix2fat(sbi, &ts, &dt);
+ sdfat_time_unix2fat(SDFAT_SB(inode->i_sb), &ts, &dt);
tp->year = dt.Year;
tp->mon = dt.Month;
#ifdef CONFIG_SDFAT_ALIGNED_MPAGE_WRITE
+#define MIN_ALIGNED_SIZE (PAGE_SIZE)
+#define MIN_ALIGNED_SIZE_MASK (MIN_ALIGNED_SIZE - 1)
+
/*************************************************************************
* INNER FUNCTIONS FOR FUNCTIONS WHICH HAS KERNEL VERSION DEPENDENCY
*************************************************************************/
{
unmap_underlying_metadata(bdev, block);
}
+
+static inline int wbc_to_write_flags(struct writeback_control *wbc)
+{
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ return WRITE_SYNC;
+
+ return 0;
+}
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
if (aligned && (max_sectors & (aligned - 1)))
aligned = 0;
+
+ if (aligned && aligned < (MIN_ALIGNED_SIZE >> SECTOR_SIZE_BITS))
+ aligned = 0;
out:
return aligned;
}
unsigned int size_to_align;
};
+/*
+ * After completing I/O on a page, call this routine to update the page
+ * flags appropriately
+ */
+static void __page_write_endio(struct page *page, int err)
+{
+ if (err) {
+ struct address_space *mapping;
+
+ SetPageError(page);
+ mapping = page_mapping(page);
+ if (mapping)
+ mapping_set_error(mapping, err);
+ }
+ __dfr_writepage_end_io(page);
+ end_page_writeback(page);
+}
+
/*
* I/O completion handler for multipage BIOs.
*
*/
static void __mpage_write_end_io(struct bio *bio, int err)
{
- struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+ struct bio_vec *bv;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)
+ struct bvec_iter_all iter_all;
ASSERT(bio_data_dir(bio) == WRITE); /* only write */
+ /* Use bio_for_each_segemnt_all() to support multi-page bvec */
+ bio_for_each_segment_all(bv, bio, iter_all)
+ __page_write_endio(bv->bv_page, err);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0)
+ struct bvec_iter_all iter_all;
+ int i;
+
+ ASSERT(bio_data_dir(bio) == WRITE); /* only write */
+
+ /* Use bio_for_each_segemnt_all() to support multi-page bvec */
+ bio_for_each_segment_all(bv, bio, i, iter_all)
+ __page_write_endio(bv->bv_page, err);
+#else
+ ASSERT(bio_data_dir(bio) == WRITE); /* only write */
+ bv = bio->bi_io_vec + bio->bi_vcnt - 1;
+
do {
- struct page *page = bvec->bv_page;
-
- if (--bvec >= bio->bi_io_vec)
- prefetchw(&bvec->bv_page->flags);
- if (err) {
- SetPageError(page);
- if (page->mapping)
- mapping_set_error(page->mapping, err);
- }
+ struct page *page = bv->bv_page;
- __dfr_writepage_end_io(page);
+ if (--bv >= bio->bi_io_vec)
+ prefetchw(&bv->bv_page->flags);
- end_page_writeback(page);
- } while (bvec >= bio->bi_io_vec);
+ __page_write_endio(page, err);
+ } while (bv >= bio->bi_io_vec);
+#endif
bio_put(bio);
}
return bio;
}
+
+#if IS_BUILTIN(CONFIG_SDFAT_FS)
+#define __write_boundary_block write_boundary_block
+#define sdfat_buffer_heads_over_limit buffer_heads_over_limit
+#else
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
+/*
+ * Called when we've recently written block `bblock', and it is known that
+ * `bblock' was for a buffer_boundary() buffer. This means that the block at
+ * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
+ * dirty, schedule it for IO. So that indirects merge nicely with their data.
+ */
+static void __write_boundary_block(struct block_device *bdev,
+ sector_t bblock, unsigned int blocksize)
+{
+ struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
+
+ if (bh) {
+ if (buffer_dirty(bh))
+ ll_rw_block(REQ_OP_WRITE, 0, 1, &bh);
+ put_bh(bh);
+ }
+}
+#else
+#warning "Need an alternative of write_boundary_block function"
+#define __write_boundary_block write_boundary_block
+#endif
+
+#warning "sdfat could not check buffer_heads_over_limit on module. Assumed zero"
+#define sdfat_buffer_heads_over_limit (0)
+#endif
+
+static void clean_buffers(struct page *page, unsigned int first_unmapped)
+{
+ unsigned int buffer_counter = 0;
+ struct buffer_head *bh, *head;
+
+ if (!page_has_buffers(page))
+ return;
+ head = page_buffers(page);
+ bh = head;
+
+ do {
+ if (buffer_counter++ == first_unmapped)
+ break;
+ clear_buffer_dirty(bh);
+ bh = bh->b_this_page;
+ } while (bh != head);
+
+ /*
+ * we cannot drop the bh if the page is not uptodate or a concurrent
+ * readpage would fail to serialize with the bh and it would read from
+ * disk before we reach the platter.
+ */
+ if (sdfat_buffer_heads_over_limit && PageUptodate(page))
+ try_to_free_buffers(page);
+}
+
static int sdfat_mpage_writepage(struct page *page,
struct writeback_control *wbc, void *data)
{
loff_t i_size = i_size_read(inode);
unsigned long end_index = i_size >> PAGE_SHIFT;
int ret = 0;
+ int op_flags = wbc_to_write_flags(wbc);
if (page_has_buffers(page)) {
struct buffer_head *head = page_buffers(page);
*/
if (bio) {
if (mpd->last_block_in_bio != blocks[0] - 1) {
- bio = mpage_bio_submit_write(0, bio);
+ bio = mpage_bio_submit_write(op_flags, bio);
} else if (mpd->size_to_align) {
unsigned int mask = mpd->size_to_align - 1;
sector_t max_end_block =
(__sdfat_bio_sector(bio) & ~(mask)) + mask;
- if ((__sdfat_bio_size(bio) != (1 << (mask + 1))) &&
+ if ((__sdfat_bio_size(bio) & MIN_ALIGNED_SIZE_MASK) &&
(mpd->last_block_in_bio == max_end_block)) {
+ int op_nomerge = op_flags | REQ_NOMERGE;
+
MMSG("%s(inode:%p) alignment mpage_bio_submit"
- "(start:%u, len:%u aligned:%u)\n",
+ "(start:%u, len:%u size:%u aligned:%u)\n",
__func__, inode,
(unsigned int)__sdfat_bio_sector(bio),
(unsigned int)(mpd->last_block_in_bio -
__sdfat_bio_sector(bio) + 1),
+ (unsigned int)__sdfat_bio_size(bio),
(unsigned int)mpd->size_to_align);
- bio = mpage_bio_submit_write(REQ_NOMERGE, bio);
+ bio = mpage_bio_submit_write(op_nomerge, bio);
}
}
}
*/
length = first_unmapped << blkbits;
if (bio_add_page(bio, page, length, 0) < length) {
- bio = mpage_bio_submit_write(0, bio);
+ bio = mpage_bio_submit_write(op_flags, bio);
goto alloc_new;
}
* OK, we have our BIO, so we can now mark the buffers clean. Make
* sure to only clean buffers which we know we'll be writing.
*/
- if (page_has_buffers(page)) {
- struct buffer_head *head = page_buffers(page);
- struct buffer_head *bh = head;
- unsigned int buffer_counter = 0;
-
- do {
- if (buffer_counter++ == first_unmapped)
- break;
- clear_buffer_dirty(bh);
- bh = bh->b_this_page;
- } while (bh != head);
-
- /*
- * we cannot drop the bh if the page is not uptodate
- * or a concurrent readpage would fail to serialize with the bh
- * and it would read from disk before we reach the platter.
- */
- if (buffer_heads_over_limit && PageUptodate(page))
- try_to_free_buffers(page);
- }
+ clean_buffers(page, first_unmapped);
BUG_ON(PageWriteback(page));
set_page_writeback(page);
unlock_page(page);
if (boundary || (first_unmapped != blocks_per_page)) {
- bio = mpage_bio_submit_write(0, bio);
+ bio = mpage_bio_submit_write(op_flags, bio);
if (boundary_block) {
- write_boundary_block(boundary_bdev,
+ __write_boundary_block(boundary_bdev,
boundary_block, 1 << blkbits);
}
} else {
confused:
if (bio)
- bio = mpage_bio_submit_write(0, bio);
+ bio = mpage_bio_submit_write(op_flags, bio);
if (mpd->use_writepage) {
ret = mapping->a_ops->writepage(page, wbc);
BUG_ON(!get_block);
blk_start_plug(&plug);
ret = write_cache_pages(mapping, wbc, sdfat_mpage_writepage, &mpd);
- if (mpd.bio)
- mpage_bio_submit_write(0, mpd.bio);
+ if (mpd.bio) {
+ int op_flags = wbc_to_write_flags(wbc);
+
+ mpage_bio_submit_write(op_flags, mpd.bio);
+ }
blk_finish_plug(&plug);
return ret;
}
{
unmap_underlying_metadata(bdev, block);
}
+
+static inline int wbc_to_write_flags(struct writeback_control *wbc)
+{
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ return WRITE_SYNC;
+
+ return 0;
+}
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
-static inline void __sdfat_submit_bio_write(struct bio *bio)
+static inline void __sdfat_submit_bio_write(struct bio *bio,
+ struct writeback_control *wbc)
{
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+ int write_flags = wbc_to_write_flags(wbc);
+
+ bio_set_op_attrs(bio, REQ_OP_WRITE, write_flags);
submit_bio(bio);
}
return init_name_hash(dentry);
}
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) */
-static inline void __sdfat_submit_bio_write(struct bio *bio)
+static inline void __sdfat_submit_bio_write(struct bio *bio,
+ struct writeback_control *wbc)
{
- submit_bio(WRITE, bio);
+ int write_flags = wbc_to_write_flags(wbc);
+
+ submit_bio(write_flags, bio);
}
static inline unsigned int __sdfat_full_name_hash(const struct dentry *unused, const char *name, unsigned int len)
}
#endif
-
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
static inline sector_t __sdfat_bio_sector(struct bio *bio)
{
/*************************************************************************
* MORE FUNCTIONS WHICH HAS KERNEL VERSION DEPENDENCY
*************************************************************************/
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0)
-#define CURRENT_TIME_SEC timespec64_trunc(current_kernel_time64(), NSEC_PER_SEC)
-#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
-#define CURRENT_TIME_SEC timespec_trunc(current_kernel_time(), NSEC_PER_SEC)
-#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) */
- /* EMPTY */
-#endif
-
-
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
static void sdfat_writepage_end_io(struct bio *bio)
{
__lock_super(sb);
/* Check if FS_ERROR occurred */
- if (sb->s_flags & MS_RDONLY) {
+ if (sb_rdonly(sb)) {
dfr_err("RDONLY partition (err %d)", -EPERM);
__unlock_super(sb);
return -EPERM;
TMSG("%s entered\n", __func__);
- ts = CURRENT_TIME_SEC;
+ ts = current_time(dir);
err = fsapi_create(dir, (u8 *) dentry->d_name.name, FM_REGULAR, &fid);
if (err)
TMSG("%s entered\n", __func__);
- ts = CURRENT_TIME_SEC;
+ ts = current_time(dir);
SDFAT_I(inode)->fid.size = i_size_read(inode);
TMSG("%s entered\n", __func__);
- ts = CURRENT_TIME_SEC;
+ ts = current_time(dir);
err = fsapi_create(dir, (u8 *) dentry->d_name.name, FM_SYMLINK, &fid);
if (err)
TMSG("%s entered\n", __func__);
- ts = CURRENT_TIME_SEC;
+ ts = current_time(dir);
err = fsapi_mkdir(dir, (u8 *) dentry->d_name.name, &fid);
if (err)
TMSG("%s entered\n", __func__);
- ts = CURRENT_TIME_SEC;
+ ts = current_time(dir);
SDFAT_I(inode)->fid.size = i_size_read(inode);
old_inode = old_dentry->d_inode;
new_inode = new_dentry->d_inode;
- ts = CURRENT_TIME_SEC;
+ ts = current_time(old_inode);
SDFAT_I(old_inode)->fid.size = i_size_read(old_inode);
if (err)
return err;
- inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
+ inode->i_ctime = inode->i_mtime = current_time(inode);
mark_inode_dirty(inode);
if (!IS_SYNC(inode))
if (err)
goto out;
- inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
+ inode->i_ctime = inode->i_mtime = current_time(inode);
if (IS_DIRSYNC(inode))
(void) sdfat_sync_inode(inode);
else
}
static inline void sdfat_submit_fullpage_bio(struct block_device *bdev,
- sector_t sector, unsigned int length, struct page *page)
+ sector_t sector, unsigned int length,
+ struct page *page, struct writeback_control *wbc)
{
/* Single page bio submit */
struct bio *bio;
__sdfat_set_bio_iterate(bio, sector, length, 0, 0);
bio->bi_end_io = sdfat_writepage_end_io;
- __sdfat_submit_bio_write(bio);
+ __sdfat_submit_bio_write(bio, wbc);
}
static int sdfat_writepage(struct page *page, struct writeback_control *wbc)
sdfat_submit_fullpage_bio(head->b_bdev,
head->b_blocknr << (sb->s_blocksize_bits - SECTOR_SIZE_BITS),
nr_blocks_towrite << inode->i_blkbits,
- page);
+ page, wbc);
unlock_page(page);
if (fsapi_check_bdi_valid(sb))
return -EIO;
- if (sb->s_flags & MS_RDONLY)
+ if (sb_rdonly(sb))
return -EROFS;
return 0;
sdfat_write_failed(mapping, pos+len);
if (!(err < 0) && !(fid->attr & ATTR_ARCHIVE)) {
- inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
+ inode->i_mtime = inode->i_ctime = current_time(inode);
fid->attr |= ATTR_ARCHIVE;
mark_inode_dirty(inode);
}
return &ei->vfs_inode;
}
-static void sdfat_destroy_inode(struct inode *inode)
+static void sdfat_free_inode(struct inode *inode)
{
if (SDFAT_I(inode)->target) {
kfree(SDFAT_I(inode)->target);
kmem_cache_free(sdfat_inode_cachep, SDFAT_I(inode));
}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
+/* Use free_inode instead of destroy_inode */
+#define sdfat_destroy_inode (NULL)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+static void sdfat_i_callback(struct rcu_head *head)
+{
+ struct inode *inode = container_of(head, struct inode, i_rcu);
+
+ sdfat_free_inode(inode);
+}
+
+static void sdfat_destroy_inode(struct inode *inode)
+{
+ call_rcu(&inode->i_rcu, sdfat_i_callback);
+}
+#else
+static void sdfat_destroy_inode(struct inode *inode)
+{
+ sdfat_free_inode(inode);
+}
+#endif
+
static int __sdfat_write_inode(struct inode *inode, int sync)
{
struct super_block *sb = inode->i_sb;
static void sdfat_evict_inode(struct inode *inode)
{
- truncate_inode_pages(&inode->i_data, 0);
+ truncate_inode_pages_final(&inode->i_data);
if (!inode->i_nlink) {
loff_t old_size = i_size_read(inode);
/* remove_inode_hash(inode); */
}
-
-
-static void sdfat_put_super(struct super_block *sb)
+static void sdfat_free_sb_info(struct sdfat_sb_info *sbi)
{
- struct sdfat_sb_info *sbi = SDFAT_SB(sb);
- int err;
-
- sdfat_log_msg(sb, KERN_INFO, "trying to unmount...");
-
- __cancel_delayed_work_sync(sbi);
-
- if (__is_sb_dirty(sb))
- sdfat_write_super(sb);
-
- __free_dfr_mem_if_required(sb);
- err = fsapi_umount(sb);
-
if (sbi->nls_disk) {
unload_nls(sbi->nls_disk);
sbi->nls_disk = NULL;
sbi->options.iocharset = sdfat_default_iocharset;
}
+ if (sbi->use_vmalloc) {
+ vfree(sbi);
+ return;
+ }
+ kfree(sbi);
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+static void delayed_free(struct rcu_head *p)
+{
+ struct sdfat_sb_info *sbi = container_of(p, struct sdfat_sb_info, rcu);
+
+ sdfat_free_sb_info(sbi);
+}
+
+static void __sdfat_destroy_sb_info(struct super_block *sb)
+{
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+
+ call_rcu(&sbi->rcu, delayed_free);
+}
+#else
+static void __sdfat_destroy_sb_info(struct super_block *sb)
+{
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+
+ sdfat_free_sb_info(sbi);
sb->s_fs_info = NULL;
+}
+#endif
+
+static void sdfat_destroy_sb_info(struct super_block *sb)
+{
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
kobject_del(&sbi->sb_kobj);
kobject_put(&sbi->sb_kobj);
- if (!sbi->use_vmalloc)
- kfree(sbi);
- else
- vfree(sbi);
+
+ __sdfat_destroy_sb_info(sb);
+}
+
+static void sdfat_put_super(struct super_block *sb)
+{
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ int err;
+
+ sdfat_log_msg(sb, KERN_INFO, "trying to unmount...");
+
+ __cancel_delayed_work_sync(sbi);
+
+ if (__is_sb_dirty(sb))
+ sdfat_write_super(sb);
+
+ __free_dfr_mem_if_required(sb);
+ err = fsapi_umount(sb);
+
+ sdfat_destroy_sb_info(sb);
sdfat_log_msg(sb, KERN_INFO, "unmounted successfully! %s",
err ? "(with previous I/O errors)" : "");
/* flush delayed FAT/DIR dirty */
__flush_delayed_meta(sb, 0);
- if (!(sb->s_flags & MS_RDONLY))
+ if (!sb_rdonly(sb))
fsapi_sync_fs(sb, 0);
__unlock_super(sb);
buf->f_bavail = info.FreeClusters;
buf->f_fsid.val[0] = (u32)id;
buf->f_fsid.val[1] = (u32)(id >> 32);
- buf->f_namelen = 260;
+ /* Unicode utf8 255 characters */
+ buf->f_namelen = MAX_NAME_LENGTH * MAX_CHARSET_SIZE;
return 0;
}
struct sdfat_sb_info *sbi = SDFAT_SB(sb);
FS_INFO_T *fsi = &(sbi->fsi);
- *flags |= MS_NODIRATIME;
+ *flags |= SB_NODIRATIME;
prev_sb_flags = sb->s_flags;
fsapi_set_vol_flags(sb, VOL_CLEAN, 1);
sdfat_log_msg(sb, KERN_INFO, "re-mounted(%s->%s), eio=0x%x, Opts: %s",
- (prev_sb_flags & MS_RDONLY) ? "ro" : "rw",
- (*flags & MS_RDONLY) ? "ro" : "rw",
+ (prev_sb_flags & SB_RDONLY) ? "ro" : "rw",
+ (*flags & SB_RDONLY) ? "ro" : "rw",
fsi->prev_eio, orig_data);
kfree(orig_data);
return 0;
static const struct super_operations sdfat_sops = {
.alloc_inode = sdfat_alloc_inode,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
+ .free_inode = sdfat_free_inode,
+#else
.destroy_inode = sdfat_destroy_inode,
+#endif
.write_inode = sdfat_write_inode,
.evict_inode = sdfat_evict_inode,
.put_super = sdfat_put_super,
FS_INFO_T *fsi = &(sbi->fsi);
DIR_ENTRY_T info;
- ts = CURRENT_TIME_SEC;
+ ts = current_time(inode);
SDFAT_I(inode)->fid.dir.dir = fsi->root_dir;
SDFAT_I(inode)->fid.dir.flags = 0x01;
mutex_init(&sbi->s_vlock);
sb->s_fs_info = sbi;
- sb->s_flags |= MS_NODIRATIME;
+ sb->s_flags |= SB_NODIRATIME;
sb->s_magic = SDFAT_SUPER_MAGIC;
sb->s_op = &sdfat_sops;
ratelimit_state_init(&sbi->ratelimit, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
+ sb->s_time_gran = NSEC_PER_SEC; /* the same with default */
+ sb->s_time_min = SDFAT_MIN_TIMESTAMP_SECS;
+ sb->s_time_max = SDFAT_MAX_TIMESTAMP_SECS;
+#endif
+
err = parse_options(sb, data, silent, &debug, &sbi->options);
if (err) {
sdfat_log_msg(sb, KERN_ERR, "failed to parse options");
static void sdfat_destroy_inodecache(void)
{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+ /*
+ * Make sure all delayed rcu free inodes are flushed before we
+ * destroy cache.
+ */
+ rcu_barrier();
+#endif
kmem_cache_destroy(sdfat_inode_cachep);
}
static void __exit exit_sdfat_fs(void)
{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+ /*
+ * Make sure all delayed rcu free inodes are flushed before we
+ * destroy cache.
+ */
+ rcu_barrier();
+#endif
sdfat_uevent_uninit();
sdfat_statistics_uninit();
struct mutex s_vlock; /* volume lock */
int use_vmalloc;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+ struct rcu_head rcu;
+#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
int s_dirt;
struct mutex s_lock; /* superblock lock */
typedef struct timespec sdfat_timespec_t;
#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) */
+/*
+ * sb->s_flags. Note that these mirror the equivalent MS_* flags where
+ * represented in both.
+ */
+#define SB_RDONLY 1 /* Mount read-only */
+#define SB_NODIRATIME 2048 /* Do not update directory access times */
+static inline bool sb_rdonly(const struct super_block *sb)
+{
+ return sb->s_flags & MS_RDONLY;
+}
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
+ /* EMPTY */
+#else
+static inline sdfat_timespec_t current_time(struct inode *inode)
+{
+ return CURRENT_TIME_SEC;
+}
+#endif
/*
* FIXME : needs on-disk-slot in-memory data
*/
DATE_TIME_T *tp);
extern void sdfat_time_unix2fat(struct sdfat_sb_info *sbi, sdfat_timespec_t *ts,
DATE_TIME_T *tp);
-extern TIMESTAMP_T *tm_now(struct sdfat_sb_info *sbi, TIMESTAMP_T *tm);
+extern TIMESTAMP_T *tm_now(struct inode *inode, TIMESTAMP_T *tm);
+static inline TIMESTAMP_T *tm_now_sb(struct super_block *sb, TIMESTAMP_T *tm)
+{
+ struct inode fake_inode;
+
+ fake_inode.i_sb = sb;
+ return tm_now(&fake_inode, tm);
+}
#ifdef CONFIG_SDFAT_DEBUG
#define CS_PBR_SECTOR 1
#define CS_DEFAULT 2
+/* time min/max */
+/* Jan 1 GMT 00:00:00 1980 */
+#define SDFAT_MIN_TIMESTAMP_SECS 315532800LL
+/* Dec 31 GMT 23:59:59 2107 */
+#define SDFAT_MAX_TIMESTAMP_SECS 4354819199LL
+
+
/*
* ioctl command
*/
/* On-Disk Type Definitions */
/*----------------------------------------------------------------------*/
-/* FAT12/16 BIOS parameter block (64 bytes) */
+/* FAT12/16/32 BIOS parameter block (64 bytes) */
typedef struct {
__u8 jmp_boot[3];
__u8 oem_name[8];
__le32 num_hid_sectors; /* . */
__le32 num_huge_sectors;
- __u8 phy_drv_no;
- __u8 state; /* used by WindowsNT for mount state */
- __u8 ext_signature;
- __u8 vol_serial[4];
- __u8 vol_label[11];
- __u8 vol_type[8];
- __le16 dummy;
-} bpb16_t;
-
-/* FAT32 BIOS parameter block (64 bytes) */
-typedef struct {
- __u8 jmp_boot[3];
- __u8 oem_name[8];
-
- __u8 sect_size[2]; /* unaligned */
- __u8 sect_per_clus;
- __le16 num_reserved;
- __u8 num_fats;
- __u8 num_root_entries[2]; /* unaligned */
- __u8 num_sectors[2]; /* unaligned */
- __u8 media_type;
- __le16 num_fat_sectors; /* zero */
- __le16 sectors_in_track;
- __le16 num_heads;
- __le32 num_hid_sectors; /* . */
- __le32 num_huge_sectors;
-
- __le32 num_fat32_sectors;
- __le16 ext_flags;
- __u8 fs_version[2];
- __le32 root_cluster; /* . */
- __le16 fsinfo_sector;
- __le16 backup_sector;
- __le16 reserved[6]; /* . */
-} bpb32_t;
+ union {
+ struct {
+ __u8 phy_drv_no;
+ __u8 state; /* used by WinNT for mount state */
+ __u8 ext_signature;
+ __u8 vol_serial[4];
+ __u8 vol_label[11];
+ __u8 vol_type[8];
+ __le16 nouse;
+ } f16;
+
+ struct {
+ __le32 num_fat32_sectors;
+ __le16 ext_flags;
+ __u8 fs_version[2];
+ __le32 root_cluster; /* . */
+ __le16 fsinfo_sector;
+ __le16 backup_sector;
+ __le16 reserved[6]; /* . */
+ } f32;
+ };
+} bpb_t;
/* FAT32 EXTEND BIOS parameter block (32 bytes) */
typedef struct {
/* FAT32 PBR (64 bytes) */
typedef struct {
- bpb16_t bpb;
+ bpb_t bpb;
} pbr16_t;
/* FAT32 PBR[BPB+BSX] (96 bytes) */
typedef struct {
- bpb32_t bpb;
+ bpb_t bpb;
bsx32_t bsx;
} pbr32_t;
typedef struct {
union {
__u8 raw[64];
- bpb16_t f16;
- bpb32_t f32;
+ bpb_t fat;
bpb64_t f64;
} bpb;
union {
/* PURPOSE : sdFAT File Manager */
/* */
/************************************************************************/
-#define SDFAT_VERSION "2.3.0-lineage"
+#define SDFAT_VERSION "2.4.5-lineage"
* FUNCTIONS WHICH HAS KERNEL VERSION DEPENDENCY
*************************************************************************/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
+#if defined(CONFIG_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
+static int sdfat_xattr_get(const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *inode,
+ const char *name, void *buffer, size_t size,
+ int flags)
+{
+ return __sdfat_getxattr(name, buffer, size);
+}
+#else
static int sdfat_xattr_get(const struct xattr_handler *handler,
struct dentry *dentry, struct inode *inode,
const char *name, void *buffer, size_t size)
{
return __sdfat_getxattr(name, buffer, size);
}
+#endif
static int sdfat_xattr_set(const struct xattr_handler *handler,
struct dentry *dentry, struct inode *inode,