From Samsung package version T725XXU2DUD1.
Change-Id: I4e8c3317e03edc44d6ac4248231067da8c4f3873
menu "DOS/FAT/NT Filesystems"
source "fs/fat/Kconfig"
+source "fs/sdfat/Kconfig"
source "fs/exfat/Kconfig"
source "fs/ntfs/Kconfig"
obj-$(CONFIG_CODA_FS) += coda/
obj-$(CONFIG_MINIX_FS) += minix/
obj-$(CONFIG_FAT_FS) += fat/
+obj-$(CONFIG_SDFAT_FS) += sdfat/
obj-$(CONFIG_EXFAT_FS) += exfat/
obj-$(CONFIG_BFS_FS) += bfs/
obj-$(CONFIG_ISO9660_FS) += isofs/
--- /dev/null
+config SDFAT_FS
+ tristate
+ default y
+ select NLS
+ select NLS_UTF8
+ select NLS_CODEPAGE_437
+ select NLS_ISO8859_1
+ help
+ If you want to use the sdFAT file system, then you must say Y or M
+ here to inlucde sdFAT support.
+ sdFAT is unified FAT-based file system which supports not only fat12/
+ 16/32 with vfat but also exfat. sdFAT supports winnt short-name rule.
+ (winnt: emulate the Windows NT rule for display/create.)
+
+ To compile this as a module, choose M here: the module will be called
+ sdfat_core and sdfat_fs.
+
+config SDFAT_DELAYED_META_DIRTY
+ bool "Enable delayed metadata dirty"
+ default y
+ depends on SDFAT_FS
+ help
+ If you enable this feature, metadata(FAT/Directory entry) is updated
+ by flush thread.
+
+config SDFAT_SUPPORT_DIR_SYNC
+ bool "Enable supporting dir sync"
+ default n
+ depends on SDFAT_FS
+ help
+ If you enable this feature, the modification for directory operation
+ is written to a storage at once.
+
+config SDFAT_DEFAULT_CODEPAGE
+ int "Default codepage for sdFAT"
+ default 437
+ depends on SDFAT_FS
+ help
+ This option should be set to the codepage of your sdFAT filesystems.
+
+config SDFAT_DEFAULT_IOCHARSET
+ string "Default iocharset for sdFAT"
+ default "utf8"
+ depends on SDFAT_FS
+ help
+ Set this to the default input/output character set you'd
+ like sdFAT to use. It should probably match the character set
+ that most of your sdFAT filesystems use, and can be overridden
+ with the "iocharset" mount option for sdFAT filesystems.
+
+config SDFAT_CHECK_RO_ATTR
+ bool "Check read-only attribute"
+ default n
+ depends on SDFAT_FS
+
+config SDFAT_ALIGNED_MPAGE_WRITE
+ bool "Enable supporting aligned mpage_write"
+ default y if SDFAT_FS=y
+ default n if SDFAT_FS=m
+ depends on SDFAT_FS
+
+config SDFAT_VIRTUAL_XATTR
+ bool "Virtual xattr support for sdFAT"
+ default n
+ depends on SDFAT_FS
+ help
+ If you enable this feature, it supports virtual xattr.
+ This feature will be deprecated because it might be the same with
+ "context" mount option.
+
+config SDFAT_VIRTUAL_XATTR_SELINUX_LABEL
+ string "Default string for SELinux label"
+ default "u:object_r:sdcard_external:s0"
+ depends on SDFAT_FS && SDFAT_VIRTUAL_XATTR
+ help
+ Set this to the default string for SELinux label.
+
+config SDFAT_SUPPORT_STLOG
+ bool "Enable storage log"
+ default y
+ depends on SDFAT_FS && PROC_STLOG
+
+config SDFAT_DEBUG
+ bool "enable debug features"
+ depends on SDFAT_FS
+ default y
+
+config SDFAT_DBG_IOCTL
+ bool "enable debug-ioctl features"
+ depends on SDFAT_FS && SDFAT_DEBUG
+ default n
+
+config SDFAT_DBG_MSG
+ bool "enable debug messages"
+ depends on SDFAT_FS && SDFAT_DEBUG
+ default y
+
+config SDFAT_DBG_BUGON
+ bool "enable strict BUG_ON() for debugging"
+ depends on SDFAT_FS && SDFAT_DEBUG
+ default n
+
+config SDFAT_STATISTICS
+ bool "enable statistics for bigdata"
+ depends on SDFAT_FS
+ default y
+
+config SDFAT_UEVENT
+ bool "enable uevent"
+ depends on SDFAT_FS
+ default y
--- /dev/null
+#
+# Makefile for the linux FAT12/16/32(VFAT)/64(exFAT) filesystem driver.
+#
+
+obj-$(CONFIG_SDFAT_FS) += sdfat_fs.o
+
+sdfat_fs-objs := sdfat.o core.o core_fat.o core_exfat.o api.o blkdev.o \
+ fatent.o amap_smart.o cache.o dfr.o nls.o misc.o \
+ mpage.o extent.o
+
+sdfat_fs-$(CONFIG_SDFAT_VIRTUAL_XATTR) += xattr.o
+sdfat_fs-$(CONFIG_SDFAT_STATISTICS) += statistics.o
+
+
+all:
+ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
+
+clean:
+ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
+
+cscope:
+ rm -rf cscope.files cscope.files
+ find $(PWD) \( -name '*.c' -o -name '*.cpp' -o -name '*.cc' -o -name '*.h' -o -name '*.s' -o -name '*.S' \) -print > cscope.files
+ cscope
--- /dev/null
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/************************************************************************/
+/* */
+/* PROJECT : exFAT & FAT12/16/32 File System */
+/* FILE : amap_smart.c */
+/* PURPOSE : FAT32 Smart allocation code for sdFAT */
+/* */
+/*----------------------------------------------------------------------*/
+/* NOTES */
+/* */
+/* */
+/************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "sdfat.h"
+#include "core.h"
+#include "amap_smart.h"
+
+/* AU list related functions */
+static inline void amap_list_del(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+
+ /* Will be used to check if the entry is a single entry(selected) */
+ entry->prev = NULL;
+ entry->next = NULL;
+}
+
+static inline int amap_insert_to_list(AU_INFO_T *au, struct slist_head *shead)
+{
+ struct slist_head *entry = &au->shead;
+
+ ASSERT(!entry->head);
+
+ entry->next = shead->next;
+ entry->head = shead;
+
+ shead->next = entry;
+
+ return 0;
+}
+
+static inline int amap_remove_from_list(AU_INFO_T *au, struct slist_head *shead)
+{
+ struct slist_head *entry = &au->shead;
+ struct slist_head *iter;
+
+ BUG_ON(entry->head != shead);
+
+ iter = shead;
+
+ while (iter->next) {
+ if (iter->next == entry) {
+ // iter->next = iter->next->next
+ iter->next = entry->next;
+
+ entry->next = NULL;
+ entry->head = NULL;
+ return 0;
+ }
+ iter = iter->next;
+ }
+
+ BUG_ON("Not reachable");
+}
+
+/* Full-linear serach => Find AU with max. number of fclu */
+static inline AU_INFO_T *amap_find_hot_au_largest(struct slist_head *shead)
+{
+ struct slist_head *iter;
+ uint16_t max_fclu = 0;
+ AU_INFO_T *entry, *ret = NULL;
+
+ ASSERT(shead->head == shead); /* Singly-list condition */
+ ASSERT(shead->next != shead);
+
+ iter = shead->next;
+
+ while (iter) {
+ entry = list_entry(iter, AU_INFO_T, shead);
+
+ if (entry->free_clusters > max_fclu) {
+ max_fclu = entry->free_clusters;
+ ret = entry;
+ }
+
+ iter = iter->next;
+ }
+
+ return ret;
+}
+
+/* Find partially used AU with max. number of fclu.
+ * If there is no partial AU available, pick a clean one
+ */
+static inline AU_INFO_T *amap_find_hot_au_partial(AMAP_T *amap)
+{
+ struct slist_head *iter;
+ uint16_t max_fclu = 0;
+ AU_INFO_T *entry, *ret = NULL;
+
+ iter = &amap->slist_hot;
+ ASSERT(iter->head == iter); /* Singly-list condition */
+ ASSERT(iter->next != iter);
+
+ iter = iter->next;
+
+ while (iter) {
+ entry = list_entry(iter, AU_INFO_T, shead);
+
+ if (entry->free_clusters > max_fclu) {
+ if (entry->free_clusters < amap->clusters_per_au) {
+ max_fclu = entry->free_clusters;
+ ret = entry;
+ } else {
+ if (!ret)
+ ret = entry;
+ }
+ }
+
+ iter = iter->next;
+ }
+
+ return ret;
+}
+
+
+
+
+/*
+ * Size-base AU management functions
+ */
+
+/*
+ * Add au into cold AU MAP
+ * au: an isolated (not in a list) AU data structure
+ */
+int amap_add_cold_au(AMAP_T *amap, AU_INFO_T *au)
+{
+ FCLU_NODE_T *fclu_node = NULL;
+
+ /* Check if a single entry */
+ BUG_ON(au->head.prev);
+
+ /* Ignore if the au is full */
+ if (!au->free_clusters)
+ return 0;
+
+ /* Find entry */
+ fclu_node = NODE(au->free_clusters, amap);
+
+ /* Insert to the list */
+ list_add_tail(&(au->head), &(fclu_node->head));
+
+ /* Update fclu_hint (Increase) */
+ if (au->free_clusters > amap->fclu_hint)
+ amap->fclu_hint = au->free_clusters;
+
+ return 0;
+}
+
+/*
+ * Remove an AU from AU MAP
+ */
+int amap_remove_cold_au(AMAP_T *amap, AU_INFO_T *au)
+{
+ struct list_head *prev = au->head.prev;
+
+ /* Single entries are not managed in lists */
+ if (!prev) {
+ BUG_ON(au->free_clusters > 0);
+ return 0;
+ }
+
+ /* remove from list */
+ amap_list_del(&(au->head));
+
+ return 0;
+}
+
+
+/* "Find" best fit AU
+ * returns NULL if there is no AU w/ enough free space.
+ *
+ * This function doesn't change AU status.
+ * The caller should call amap_remove_cold_au() if needed.
+ */
+AU_INFO_T *amap_find_cold_au_bestfit(AMAP_T *amap, uint16_t free_clusters)
+{
+ AU_INFO_T *au = NULL;
+ FCLU_NODE_T *fclu_iter;
+
+ if (free_clusters <= 0 || free_clusters > amap->clusters_per_au) {
+ EMSG("AMAP: amap_find_cold_au_bestfit / unexpected arg. (%d)\n",
+ free_clusters);
+ return NULL;
+ }
+
+ fclu_iter = NODE(free_clusters, amap);
+
+ if (amap->fclu_hint < free_clusters) {
+ /* There is no AUs with enough free_clusters */
+ return NULL;
+ }
+
+ /* Naive Hash management (++) */
+ do {
+ if (!list_empty(&fclu_iter->head)) {
+ struct list_head *first = fclu_iter->head.next;
+
+ au = list_entry(first, AU_INFO_T, head);
+
+ break;
+ }
+
+ fclu_iter++;
+ } while (fclu_iter < (amap->fclu_nodes + amap->clusters_per_au));
+
+
+ // BUG_ON(au->free_clusters < 0);
+ BUG_ON(au && (au->free_clusters > amap->clusters_per_au));
+
+ return au;
+}
+
+
+/* "Pop" best fit AU
+ *
+ * returns NULL if there is no AU w/ enough free space.
+ * The returned AU will not be in the list anymore.
+ */
+AU_INFO_T *amap_pop_cold_au_bestfit(AMAP_T *amap, uint16_t free_clusters)
+{
+ /* Naive implementation */
+ AU_INFO_T *au;
+
+ au = amap_find_cold_au_bestfit(amap, free_clusters);
+ if (au)
+ amap_remove_cold_au(amap, au);
+
+ return au;
+}
+
+
+
+/* Pop the AU with the largest free space
+ *
+ * search from 'start_fclu' to 0
+ * (target freecluster : -1 for each step)
+ * start_fclu = 0 means to search from the max. value
+ */
+AU_INFO_T *amap_pop_cold_au_largest(AMAP_T *amap, uint16_t start_fclu)
+{
+ AU_INFO_T *au = NULL;
+ FCLU_NODE_T *fclu_iter;
+
+ if (!start_fclu)
+ start_fclu = amap->clusters_per_au;
+ if (start_fclu > amap->clusters_per_au)
+ start_fclu = amap->clusters_per_au;
+
+ /* Use hint (search start point) */
+ if (amap->fclu_hint < start_fclu)
+ fclu_iter = NODE(amap->fclu_hint, amap);
+ else
+ fclu_iter = NODE(start_fclu, amap);
+
+ /* Naive Hash management */
+ do {
+ if (!list_empty(&fclu_iter->head)) {
+ struct list_head *first = fclu_iter->head.next;
+
+ au = list_entry(first, AU_INFO_T, head);
+ // BUG_ON((au < amap->entries) || ((amap->entries + amap->n_au) <= au));
+
+ amap_list_del(first);
+
+ // (Hint) Possible maximum value of free clusters (among cold)
+ /* if it wasn't the whole search, don't update fclu_hint */
+ if (start_fclu == amap->clusters_per_au)
+ amap->fclu_hint = au->free_clusters;
+
+ break;
+ }
+
+ fclu_iter--;
+ } while (amap->fclu_nodes <= fclu_iter);
+
+ return au;
+}
+
+
+
+/*
+ * ===============================================
+ * Allocation Map related functions
+ * ===============================================
+ */
+
+/* Create AMAP related data structure (mount time) */
+int amap_create(struct super_block *sb, u32 pack_ratio, u32 sect_per_au, u32 hidden_sect)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ AMAP_T *amap;
+ int total_used_clusters;
+ int n_au_table = 0;
+ int i, i_clu, i_au;
+ int i_au_root = -1, i_au_hot_from = INT_MAX;
+ u32 misaligned_sect = hidden_sect;
+ u64 tmp;
+
+ BUG_ON(!fsi->bd_opened);
+
+ if (fsi->amap)
+ return -EEXIST;
+
+ /* Check conditions */
+ if (fsi->vol_type != FAT32) {
+ sdfat_msg(sb, KERN_ERR, "smart allocation is only available "
+ "with fat32-fs");
+ return -ENOTSUPP;
+ }
+
+ if (fsi->num_sectors < AMAP_MIN_SUPPORT_SECTORS) {
+ sdfat_msg(sb, KERN_ERR, "smart allocation is only available "
+ "with sectors above %d", AMAP_MIN_SUPPORT_SECTORS);
+ return -ENOTSUPP;
+ }
+
+ /* AU size must be a multiple of clu_size */
+ if ((sect_per_au <= 0) || (sect_per_au & (fsi->sect_per_clus - 1))) {
+ sdfat_msg(sb, KERN_ERR,
+ "invalid AU size (sect_per_au : %u, "
+ "sect_per_clus : %u) "
+ "please re-format for performance.",
+ sect_per_au, fsi->sect_per_clus);
+ return -EINVAL;
+ }
+
+ /* the start sector of this partition must be a multiple of clu_size */
+ if (misaligned_sect & (fsi->sect_per_clus - 1)) {
+ sdfat_msg(sb, KERN_ERR,
+ "misaligned part (start sect : %u, "
+ "sect_per_clus : %u) "
+ "please re-format for performance.",
+ misaligned_sect, fsi->sect_per_clus);
+ return -EINVAL;
+ }
+
+ /* data start sector must be a multiple of clu_size */
+ if (fsi->data_start_sector & (fsi->sect_per_clus - 1)) {
+ sdfat_msg(sb, KERN_ERR,
+ "misaligned data area (start sect : %llu, "
+ "sect_per_clus : %u) "
+ "please re-format for performance.",
+ fsi->data_start_sector, fsi->sect_per_clus);
+ return -EINVAL;
+ }
+
+ misaligned_sect &= (sect_per_au - 1);
+
+ /* Allocate data structrues */
+ amap = kzalloc(sizeof(AMAP_T), GFP_NOIO);
+ if (!amap)
+ return -ENOMEM;
+
+ amap->sb = sb;
+
+ tmp = fsi->num_sectors + misaligned_sect + sect_per_au - 1;
+ do_div(tmp, sect_per_au);
+ amap->n_au = tmp;
+ amap->n_clean_au = 0;
+ amap->n_full_au = 0;
+
+ /* Reflect block-partition align first,
+ * then partition-data_start align
+ */
+ amap->clu_align_bias = (misaligned_sect / fsi->sect_per_clus);
+ amap->clu_align_bias += (fsi->data_start_sector >> fsi->sect_per_clus_bits) - CLUS_BASE;
+ amap->clusters_per_au = sect_per_au / fsi->sect_per_clus;
+
+ /* That is,
+ * the size of cluster is at least 4KB if the size of AU is 4MB
+ */
+ if (amap->clusters_per_au > MAX_CLU_PER_AU) {
+ sdfat_log_msg(sb, KERN_INFO,
+ "too many clusters per AU (clus/au:%d > %d).",
+ amap->clusters_per_au,
+ MAX_CLU_PER_AU);
+ }
+
+ /* is it needed? why here? */
+ // set_sb_dirty(sb);
+
+ spin_lock_init(&amap->amap_lock);
+
+ amap->option.packing_ratio = pack_ratio;
+ amap->option.au_size = sect_per_au;
+ amap->option.au_align_factor = hidden_sect;
+
+
+ /* Allocate AU info table */
+ n_au_table = (amap->n_au + N_AU_PER_TABLE - 1) / N_AU_PER_TABLE;
+ amap->au_table = kmalloc(sizeof(AU_INFO_T *) * n_au_table, GFP_NOIO);
+ if (!amap->au_table) {
+ sdfat_msg(sb, KERN_ERR,
+ "failed to alloc amap->au_table\n");
+ kfree(amap);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < n_au_table; i++)
+ amap->au_table[i] = (AU_INFO_T *)get_zeroed_page(GFP_NOIO);
+
+ /* Allocate buckets indexed by # of free clusters */
+ amap->fclu_order = get_order(sizeof(FCLU_NODE_T) * amap->clusters_per_au);
+
+ // XXX: amap->clusters_per_au limitation is 512 (w/ 8 byte list_head)
+ sdfat_log_msg(sb, KERN_INFO, "page orders for AU nodes : %d "
+ "(clus_per_au : %d, node_size : %lu)",
+ amap->fclu_order,
+ amap->clusters_per_au,
+ (unsigned long)sizeof(FCLU_NODE_T));
+
+ if (!amap->fclu_order)
+ amap->fclu_nodes = (FCLU_NODE_T *)get_zeroed_page(GFP_NOIO);
+ else
+ amap->fclu_nodes = vzalloc(PAGE_SIZE << amap->fclu_order);
+
+ amap->fclu_hint = amap->clusters_per_au;
+
+ /* Hot AU list, ignored AU list */
+ amap->slist_hot.next = NULL;
+ amap->slist_hot.head = &amap->slist_hot;
+ amap->total_fclu_hot = 0;
+
+ amap->slist_ignored.next = NULL;
+ amap->slist_ignored.head = &amap->slist_ignored;
+
+ /* Strategy related vars. */
+ amap->cur_cold.au = NULL;
+ amap->cur_hot.au = NULL;
+ amap->n_need_packing = 0;
+
+
+ /* Build AMAP info */
+ total_used_clusters = 0; // Count # of used clusters
+
+ i_au_root = i_AU_of_CLU(amap, fsi->root_dir);
+ i_au_hot_from = amap->n_au - (SMART_ALLOC_N_HOT_AU - 1);
+
+ for (i = 0; i < amap->clusters_per_au; i++)
+ INIT_LIST_HEAD(&amap->fclu_nodes[i].head);
+
+ /*
+ * Thanks to kzalloc()
+ * amap->entries[i_au].free_clusters = 0;
+ * amap->entries[i_au].head.prev = NULL;
+ * amap->entries[i_au].head.next = NULL;
+ */
+
+ /* Parse FAT table */
+ for (i_clu = CLUS_BASE; i_clu < fsi->num_clusters; i_clu++) {
+ u32 clu_data;
+ AU_INFO_T *au;
+
+ if (fat_ent_get(sb, i_clu, &clu_data)) {
+ sdfat_msg(sb, KERN_ERR,
+ "failed to read fat entry(%u)\n", i_clu);
+ goto free_and_eio;
+ }
+
+ if (IS_CLUS_FREE(clu_data)) {
+ au = GET_AU(amap, i_AU_of_CLU(amap, i_clu));
+ au->free_clusters++;
+ } else
+ total_used_clusters++;
+ }
+
+ /* Build AU list */
+ for (i_au = 0; i_au < amap->n_au; i_au++) {
+ AU_INFO_T *au = GET_AU(amap, i_au);
+
+ au->idx = i_au;
+ BUG_ON(au->free_clusters > amap->clusters_per_au);
+
+ if (au->free_clusters == amap->clusters_per_au)
+ amap->n_clean_au++;
+ else if (au->free_clusters == 0)
+ amap->n_full_au++;
+
+ /* If hot, insert to the hot list */
+ if (i_au >= i_au_hot_from) {
+ amap_add_hot_au(amap, au);
+ amap->total_fclu_hot += au->free_clusters;
+ } else if (i_au != i_au_root || SMART_ALLOC_N_HOT_AU == 0) {
+ /* Otherwise, insert to the free cluster hash */
+ amap_add_cold_au(amap, au);
+ }
+ }
+
+ /* Hot list -> (root) -> (last) -> (last - 1) -> ... */
+ if (i_au_root >= 0 && SMART_ALLOC_N_HOT_AU > 0) {
+ amap_add_hot_au(amap, GET_AU(amap, i_au_root));
+ amap->total_fclu_hot += GET_AU(amap, i_au_root)->free_clusters;
+ }
+
+ fsi->amap = amap;
+ fsi->used_clusters = total_used_clusters;
+
+ sdfat_msg(sb, KERN_INFO,
+ "AMAP: Smart allocation enabled (opt : %u / %u / %u)",
+ amap->option.au_size, amap->option.au_align_factor,
+ amap->option.packing_ratio);
+
+ /* Debug purpose - check */
+ //{
+ //u32 used_clusters;
+ //fat_count_used_clusters(sb, &used_clusters)
+ //ASSERT(used_clusters == total_used_clusters);
+ //}
+
+ return 0;
+
+
+free_and_eio:
+ if (amap) {
+ if (amap->au_table) {
+ for (i = 0; i < n_au_table; i++)
+ free_page((unsigned long)amap->au_table[i]);
+ kfree(amap->au_table);
+ }
+ if (amap->fclu_nodes) {
+ if (!amap->fclu_order)
+ free_page((unsigned long)amap->fclu_nodes);
+ else
+ vfree(amap->fclu_nodes);
+ }
+ kfree(amap);
+ }
+ return -EIO;
+}
+
+
+/* Free AMAP related structure */
+void amap_destroy(struct super_block *sb)
+{
+ AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
+ int n_au_table;
+
+ if (!amap)
+ return;
+
+ DMSG("%s\n", __func__);
+
+ n_au_table = (amap->n_au + N_AU_PER_TABLE - 1) / N_AU_PER_TABLE;
+
+ if (amap->au_table) {
+ int i;
+
+ for (i = 0; i < n_au_table; i++)
+ free_page((unsigned long)amap->au_table[i]);
+
+ kfree(amap->au_table);
+ }
+ if (!amap->fclu_order)
+ free_page((unsigned long)amap->fclu_nodes);
+ else
+ vfree(amap->fclu_nodes);
+ kfree(amap);
+ SDFAT_SB(sb)->fsi.amap = NULL;
+}
+
+
+/*
+ * Check status of FS
+ * and change destination if needed to disable AU-aligned alloc.
+ * (from ALLOC_COLD_ALIGNED to ALLOC_COLD_SEQ)
+ */
+static inline int amap_update_dest(AMAP_T *amap, int ori_dest)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(amap->sb)->fsi);
+ int n_partial_au, n_partial_freeclus;
+
+ if (ori_dest != ALLOC_COLD_ALIGNED)
+ return ori_dest;
+
+ /* # of partial AUs and # of clusters in those AUs */
+ n_partial_au = amap->n_au - amap->n_clean_au - amap->n_full_au;
+ n_partial_freeclus = fsi->num_clusters - fsi->used_clusters -
+ amap->clusters_per_au * amap->n_clean_au;
+
+ /* Status of AUs : Full / Partial / Clean
+ * If there are many partial (and badly fragmented) AUs,
+ * the throughput will decrease extremly.
+ *
+ * The follow code will treat those worst cases.
+ */
+
+ /* XXX: AMAP heuristics */
+ if ((amap->n_clean_au * 50 <= amap->n_au) &&
+ (n_partial_freeclus*2) < (n_partial_au*amap->clusters_per_au)) {
+ /* If clean AUs are fewer than 2% of n_au (80 AUs per 16GB)
+ * and fragment ratio is more than 2 (AVG free_clusters=half AU)
+ *
+ * disable clean-first allocation
+ * enable VFAT-like sequential allocation
+ */
+ return ALLOC_COLD_SEQ;
+ }
+
+ return ori_dest;
+}
+
+
+#define PACKING_SOFTLIMIT (amap->option.packing_ratio)
+#define PACKING_HARDLIMIT (amap->option.packing_ratio * 4)
+/*
+ * Pick a packing AU if needed.
+ * Otherwise just return NULL
+ *
+ * This function includes some heuristics.
+ */
+static inline AU_INFO_T *amap_get_packing_au(AMAP_T *amap, int dest, int num_to_wb, int *clu_to_skip)
+{
+ AU_INFO_T *au = NULL;
+
+ if (dest == ALLOC_COLD_PACKING) {
+ /* ALLOC_COLD_PACKING:
+ * Packing-first mode for defrag.
+ * Optimized to save clean AU
+ *
+ * 1) best-fit AU
+ * 2) Smallest AU (w/ minimum free clusters)
+ */
+ if (num_to_wb >= amap->clusters_per_au)
+ num_to_wb = num_to_wb % amap->clusters_per_au;
+
+ /* 이거 주석처리하면, AU size 딱 맞을때는 clean, 나머지는 작은거부터 */
+ if (num_to_wb == 0)
+ num_to_wb = 1; // Don't use clean AUs
+
+ au = amap_find_cold_au_bestfit(amap, num_to_wb);
+ if (au && au->free_clusters == amap->clusters_per_au && num_to_wb > 1) {
+ /* if au is clean then get a new partial one */
+ au = amap_find_cold_au_bestfit(amap, 1);
+ }
+
+ if (au) {
+ amap->n_need_packing = 0;
+ amap_remove_cold_au(amap, au);
+ return au;
+ }
+ }
+
+
+ /* Heuristic packing:
+ * This will improve QoS greatly.
+ *
+ * Count # of AU_ALIGNED allocation.
+ * If the number exceeds the specific threshold,
+ * allocate on a partial AU or generate random I/O.
+ */
+ if ((PACKING_SOFTLIMIT > 0) &&
+ (amap->n_need_packing >= PACKING_SOFTLIMIT) &&
+ (num_to_wb < (int)amap->clusters_per_au)) {
+ /* Best-fit packing:
+ * If num_to_wb (expected number to be allocated) is smaller
+ * than AU_SIZE, find a best-fit AU.
+ */
+
+ /* Back margin (heuristics) */
+ if (num_to_wb < amap->clusters_per_au / 4)
+ num_to_wb = amap->clusters_per_au / 4;
+
+ au = amap_find_cold_au_bestfit(amap, num_to_wb);
+ if (au != NULL) {
+ amap_remove_cold_au(amap, au);
+
+ MMSG("AMAP: packing (cnt: %d) / softlimit, "
+ "best-fit (num_to_wb: %d))\n",
+ amap->n_need_packing, num_to_wb);
+
+ if (au->free_clusters > num_to_wb) { // Best-fit search: if 문 무조건 hit
+ *clu_to_skip = au->free_clusters - num_to_wb;
+ /* otherwise don't skip */
+ }
+ amap->n_need_packing = 0;
+ return au;
+ }
+ }
+
+ if ((PACKING_HARDLIMIT != 0) &&
+ amap->n_need_packing >= PACKING_HARDLIMIT) {
+ /* Compulsory SLC flushing:
+ * If there was no chance to do best-fit packing
+ * and the # of AU-aligned allocation exceeds HARD threshold,
+ * then pick a clean AU and generate a compulsory random I/O.
+ */
+ au = amap_pop_cold_au_largest(amap, amap->clusters_per_au);
+ if (au) {
+ MMSG("AMAP: packing (cnt: %d) / hard-limit, largest)\n",
+ amap->n_need_packing);
+
+ if (au->free_clusters >= 96) {
+ *clu_to_skip = au->free_clusters / 2;
+ MMSG("AMAP: cluster idx re-position\n");
+ }
+ amap->n_need_packing = 0;
+ return au;
+ }
+ }
+
+ /* Update # of clean AU allocation */
+ amap->n_need_packing++;
+ return NULL;
+}
+
+
+/* Pick a target AU:
+ * This function should be called
+ * only if there are one or more free clusters in the bdev.
+ */
+TARGET_AU_T *amap_get_target_au(AMAP_T *amap, int dest, int num_to_wb)
+{
+ int loop_count = 0;
+
+retry:
+ if (++loop_count >= 3) {
+ /* No space available (or AMAP consistency error)
+ * This could happen because of the ignored AUs but not likely
+ * (because the defrag daemon will not work if there is no enough space)
+ */
+ BUG_ON(amap->slist_ignored.next == NULL);
+ return NULL;
+ }
+
+ /* Hot clusters (DIR) */
+ if (dest == ALLOC_HOT) {
+
+ /* Working hot AU exist? */
+ if (amap->cur_hot.au == NULL || amap->cur_hot.au->free_clusters == 0) {
+ AU_INFO_T *au;
+
+ if (amap->total_fclu_hot == 0) {
+ /* No more hot AU avaialbe */
+ dest = ALLOC_COLD;
+
+ goto retry;
+ }
+
+ au = amap_find_hot_au_partial(amap);
+
+ BUG_ON(au == NULL);
+ BUG_ON(au->free_clusters <= 0);
+
+ amap->cur_hot.au = au;
+ amap->cur_hot.idx = 0;
+ amap->cur_hot.clu_to_skip = 0;
+ }
+
+ /* Now allocate on a hot AU */
+ return &amap->cur_hot;
+ }
+
+ /* Cold allocation:
+ * If amap->cur_cold.au has one or more free cluster(s),
+ * then just return amap->cur_cold
+ */
+ if ((!amap->cur_cold.au)
+ || (amap->cur_cold.idx == amap->clusters_per_au)
+ || (amap->cur_cold.au->free_clusters == 0)) {
+
+ AU_INFO_T *au = NULL;
+ const AU_INFO_T *old_au = amap->cur_cold.au;
+ int n_clu_to_skip = 0;
+
+ if (old_au) {
+ ASSERT(!IS_AU_WORKING(old_au, amap));
+ /* must be NOT WORKING AU.
+ * (only for information gathering)
+ */
+ }
+
+ /* Next target AU is needed:
+ * There are 3 possible ALLOC options for cold AU
+ *
+ * ALLOC_COLD_ALIGNED: Clean AU first, but heuristic packing is ON
+ * ALLOC_COLD_PACKING: Packing AU first (usually for defrag)
+ * ALLOC_COLD_SEQ : Sequential AU allocation (VFAT-like)
+ */
+
+ /* Experimental: Modify allocation destination if needed (ALIGNED => SEQ) */
+ // dest = amap_update_dest(amap, dest);
+
+ if ((dest == ALLOC_COLD_SEQ) && old_au) {
+ int i_au = old_au->idx + 1;
+
+ while (i_au != old_au->idx) {
+ au = GET_AU(amap, i_au);
+
+ if ((au->free_clusters > 0) &&
+ !IS_AU_HOT(au, amap) &&
+ !IS_AU_IGNORED(au, amap)) {
+ MMSG("AMAP: new cold AU(%d) with %d "
+ "clusters (seq)\n",
+ au->idx, au->free_clusters);
+
+ amap_remove_cold_au(amap, au);
+ goto ret_new_cold;
+ }
+ i_au++;
+ if (i_au >= amap->n_au)
+ i_au = 0;
+ }
+
+ // no cold AUs are available => Hot allocation
+ dest = ALLOC_HOT;
+ goto retry;
+ }
+
+
+ /*
+ * Check if packing is needed
+ * (ALLOC_COLD_PACKING is treated by this function)
+ */
+ au = amap_get_packing_au(amap, dest, num_to_wb, &n_clu_to_skip);
+ if (au) {
+ MMSG("AMAP: new cold AU(%d) with %d clusters "
+ "(packing)\n", au->idx, au->free_clusters);
+ goto ret_new_cold;
+ }
+
+ /* ALLOC_COLD_ALIGNED */
+ /* Check if the adjacent AU is clean */
+ if (old_au && ((old_au->idx + 1) < amap->n_au)) {
+ au = GET_AU(amap, old_au->idx + 1);
+ if ((au->free_clusters == amap->clusters_per_au) &&
+ !IS_AU_HOT(au, amap) &&
+ !IS_AU_IGNORED(au, amap)) {
+ MMSG("AMAP: new cold AU(%d) with %d clusters "
+ "(adjacent)\n", au->idx, au->free_clusters);
+ amap_remove_cold_au(amap, au);
+ goto ret_new_cold;
+ }
+ }
+
+ /* Clean or largest AU */
+ au = amap_pop_cold_au_largest(amap, 0);
+ if (!au) {
+ //ASSERT(amap->total_fclu_hot == (fsi->num_clusters - fsi->used_clusters - 2));
+ dest = ALLOC_HOT;
+ goto retry;
+ }
+
+ MMSG("AMAP: New cold AU (%d) with %d clusters\n",
+ au->idx, au->free_clusters);
+
+ret_new_cold:
+ SET_AU_WORKING(au);
+
+ amap->cur_cold.au = au;
+ amap->cur_cold.idx = 0;
+ amap->cur_cold.clu_to_skip = n_clu_to_skip;
+ }
+
+ return &amap->cur_cold;
+}
+
+/* Put and update target AU */
+void amap_put_target_au(AMAP_T *amap, TARGET_AU_T *cur, unsigned int num_allocated)
+{
+ /* Update AMAP info vars. */
+ if (num_allocated > 0 &&
+ (cur->au->free_clusters + num_allocated) == amap->clusters_per_au) {
+ /* if the target AU was a clean AU before this allocation ... */
+ amap->n_clean_au--;
+ }
+ if (num_allocated > 0 &&
+ cur->au->free_clusters == 0)
+ amap->n_full_au++;
+
+ if (IS_AU_HOT(cur->au, amap)) {
+ /* Hot AU */
+ MMSG("AMAP: hot allocation at AU %d\n", cur->au->idx);
+ amap->total_fclu_hot -= num_allocated;
+
+ /* Intra-AU round-robin */
+ if (cur->idx >= amap->clusters_per_au)
+ cur->idx = 0;
+
+ /* No more space available */
+ if (cur->au->free_clusters == 0)
+ cur->au = NULL;
+
+ } else {
+ /* non-hot AU */
+ ASSERT(IS_AU_WORKING(cur->au, amap));
+
+ if (cur->idx >= amap->clusters_per_au || cur->au->free_clusters == 0) {
+ /* It should be inserted back to AU MAP */
+ cur->au->shead.head = NULL; // SET_AU_NOT_WORKING
+ amap_add_cold_au(amap, cur->au);
+
+ // cur->au = NULL; // This value will be used for the next AU selection
+ cur->idx = amap->clusters_per_au; // AU closing
+ }
+ }
+
+}
+
+
+/* Reposition target->idx for packing (Heuristics):
+ * Skip (num_to_skip) free clusters in (cur->au)
+ */
+static inline int amap_skip_cluster(struct super_block *sb, TARGET_AU_T *cur, int num_to_skip)
+{
+ AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
+ u32 clu, read_clu;
+ MMSG_VAR(int num_to_skip_orig = num_to_skip);
+
+ if (num_to_skip >= cur->au->free_clusters) {
+ EMSG("AMAP(%s): skip mis-use. amap_566\n", __func__);
+ return -EIO;
+ }
+
+ clu = CLU_of_i_AU(amap, cur->au->idx, cur->idx);
+ while (num_to_skip > 0) {
+ if (clu >= CLUS_BASE) {
+ /* Cf.
+ * If AMAP's integrity is okay,
+ * we don't need to check if (clu < fsi->num_clusters)
+ */
+
+ if (fat_ent_get(sb, clu, &read_clu))
+ return -EIO;
+
+ if (IS_CLUS_FREE(read_clu))
+ num_to_skip--;
+ }
+
+ // Move clu->idx
+ clu++;
+ (cur->idx)++;
+
+ if (cur->idx >= amap->clusters_per_au) {
+ /* End of AU (Not supposed) */
+ EMSG("AMAP: Skip - End of AU?! (amap_596)\n");
+ cur->idx = 0;
+ return -EIO;
+ }
+ }
+
+ MMSG("AMAP: Skip_clusters (%d skipped => %d, among %d free clus)\n",
+ num_to_skip_orig, cur->idx, cur->au->free_clusters);
+
+ return 0;
+}
+
+
+/* AMAP-based allocation function for FAT32 */
+s32 amap_fat_alloc_cluster(struct super_block *sb, u32 num_alloc, CHAIN_T *p_chain, s32 dest)
+{
+ AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
+ TARGET_AU_T *cur = NULL;
+ AU_INFO_T *target_au = NULL; /* Allocation target AU */
+ s32 ret = -ENOSPC;
+ u32 last_clu = CLUS_EOF, read_clu;
+ u32 new_clu, total_cnt;
+ u32 num_allocated = 0, num_allocated_each = 0;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ BUG_ON(!amap);
+ BUG_ON(IS_CLUS_EOF(fsi->used_clusters));
+
+ total_cnt = fsi->num_clusters - CLUS_BASE;
+
+ if (unlikely(total_cnt < fsi->used_clusters)) {
+ sdfat_fs_error_ratelimit(sb,
+ "AMAP(%s): invalid used clusters(t:%u,u:%u)\n",
+ __func__, total_cnt, fsi->used_clusters);
+ return -EIO;
+ }
+
+ if (num_alloc > total_cnt - fsi->used_clusters)
+ return -ENOSPC;
+
+ p_chain->dir = CLUS_EOF;
+
+ set_sb_dirty(sb);
+
+ // spin_lock(&amap->amap_lock);
+
+retry_alloc:
+ /* Allocation strategy implemented */
+ cur = amap_get_target_au(amap, dest, fsi->reserved_clusters);
+ if (unlikely(!cur)) {
+ // There is no available AU (only ignored-AU are left)
+ sdfat_msg(sb, KERN_ERR, "AMAP Allocator: no avaialble AU.");
+ goto error;
+ }
+
+ /* If there are clusters to skip */
+ if (cur->clu_to_skip > 0) {
+ if (amap_skip_cluster(sb, &amap->cur_cold, cur->clu_to_skip)) {
+ ret = -EIO;
+ goto error;
+ }
+ cur->clu_to_skip = 0;
+ }
+
+ target_au = cur->au;
+
+ /*
+ * cur->au : target AU info pointer
+ * cur->idx : the intra-cluster idx in the AU to start from
+ */
+ BUG_ON(!cur->au);
+ BUG_ON(!cur->au->free_clusters);
+ BUG_ON(cur->idx >= amap->clusters_per_au);
+
+ num_allocated_each = 0;
+ new_clu = CLU_of_i_AU(amap, target_au->idx, cur->idx);
+
+ do {
+ /* Allocate at the target AU */
+ if ((new_clu >= CLUS_BASE) && (new_clu < fsi->num_clusters)) {
+ if (fat_ent_get(sb, new_clu, &read_clu)) {
+ // spin_unlock(&amap->amap_lock);
+ ret = -EIO;
+ goto error;
+ }
+
+ if (IS_CLUS_FREE(read_clu)) {
+ BUG_ON(GET_AU(amap, i_AU_of_CLU(amap, new_clu)) != target_au);
+
+ /* Free cluster found */
+ if (fat_ent_set(sb, new_clu, CLUS_EOF)) {
+ ret = -EIO;
+ goto error;
+ }
+
+ num_allocated_each++;
+
+ if (IS_CLUS_EOF(p_chain->dir)) {
+ p_chain->dir = new_clu;
+ } else {
+ if (fat_ent_set(sb, last_clu, new_clu)) {
+ ret = -EIO;
+ goto error;
+ }
+ }
+ last_clu = new_clu;
+
+ /* Update au info */
+ target_au->free_clusters--;
+ }
+
+ }
+
+ new_clu++;
+ (cur->idx)++;
+
+ /* End of the AU */
+ if ((cur->idx >= amap->clusters_per_au) || !(target_au->free_clusters))
+ break;
+ } while (num_allocated_each < num_alloc);
+
+ /* Update strategy info */
+ amap_put_target_au(amap, cur, num_allocated_each);
+
+
+ num_allocated += num_allocated_each;
+ fsi->used_clusters += num_allocated_each;
+ num_alloc -= num_allocated_each;
+
+
+ if (num_alloc > 0)
+ goto retry_alloc;
+
+ // spin_unlock(&amap->amap_lock);
+ return 0;
+error:
+ if (num_allocated)
+ fsi->fs_func->free_cluster(sb, p_chain, 0);
+ return ret;
+}
+
+
+/* Free cluster for FAT32 (not implemented yet) */
+s32 amap_free_cluster(struct super_block *sb, CHAIN_T *p_chain, s32 do_relse)
+{
+ return -ENOTSUPP;
+}
+
+
+/*
+ * This is called by fat_free_cluster()
+ * to update AMAP info.
+ */
+s32 amap_release_cluster(struct super_block *sb, u32 clu)
+{
+ AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
+ AU_INFO_T *au;
+ int i_au;
+
+ // spin_lock(&amap->amap_lock);
+
+ /* Update AU info */
+ i_au = i_AU_of_CLU(amap, clu);
+ BUG_ON(i_au >= amap->n_au);
+ au = GET_AU(amap, i_au);
+ if (au->free_clusters >= amap->clusters_per_au) {
+ sdfat_fs_error(sb, "%s, au->free_clusters(%hd) is "
+ "greater than or equal to amap->clusters_per_au(%hd)",
+ __func__, au->free_clusters, amap->clusters_per_au);
+ return -EIO;
+ }
+
+ if (IS_AU_HOT(au, amap)) {
+ MMSG("AMAP: Hot cluster freed\n");
+ au->free_clusters++;
+ amap->total_fclu_hot++;
+ } else if (!IS_AU_WORKING(au, amap) && !IS_AU_IGNORED(au, amap)) {
+ /* Ordinary AU - update AU tree */
+ // Can be optimized by implementing amap_update_au
+ amap_remove_cold_au(amap, au);
+ au->free_clusters++;
+ amap_add_cold_au(amap, au);
+ } else
+ au->free_clusters++;
+
+
+ /* Update AMAP info */
+ if (au->free_clusters == amap->clusters_per_au)
+ amap->n_clean_au++;
+ if (au->free_clusters == 1)
+ amap->n_full_au--;
+
+ // spin_unlock(&amap->amap_lock);
+ return 0;
+}
+
+
+/*
+ * Check if the cluster is in a working AU
+ * The caller should hold sb lock.
+ * This func. should be used only if smart allocation is on
+ */
+s32 amap_check_working(struct super_block *sb, u32 clu)
+{
+ AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
+ AU_INFO_T *au;
+
+ BUG_ON(!amap);
+ au = GET_AU(amap, i_AU_of_CLU(amap, clu));
+ return IS_AU_WORKING(au, amap);
+}
+
+
+/*
+ * Return the # of free clusters in that AU
+ */
+s32 amap_get_freeclus(struct super_block *sb, u32 clu)
+{
+ AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
+ AU_INFO_T *au;
+
+ BUG_ON(!amap);
+ au = GET_AU(amap, i_AU_of_CLU(amap, clu));
+ return (s32)au->free_clusters;
+}
+
+
+/*
+ * Add the AU containing 'clu' to the ignored AU list.
+ * The AU will not be used by the allocator.
+ *
+ * XXX: Ignored counter needed
+ */
+s32 amap_mark_ignore(struct super_block *sb, u32 clu)
+{
+ AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
+ AU_INFO_T *au;
+
+ BUG_ON(!amap);
+ au = GET_AU(amap, i_AU_of_CLU(amap, clu));
+
+ if (IS_AU_HOT(au, amap)) {
+ /* Doesn't work with hot AUs */
+ return -EPERM;
+ } else if (IS_AU_WORKING(au, amap)) {
+ return -EBUSY;
+ }
+
+ //BUG_ON(IS_AU_IGNORED(au, amap) && (GET_IGN_CNT(au) == 0));
+ if (IS_AU_IGNORED(au, amap))
+ return 0;
+
+ amap_remove_cold_au(amap, au);
+ amap_insert_to_list(au, &amap->slist_ignored);
+
+ BUG_ON(!IS_AU_IGNORED(au, amap));
+
+ //INC_IGN_CNT(au);
+ MMSG("AMAP: Mark ignored AU (%d)\n", au->idx);
+ return 0;
+}
+
+
+/*
+ * This function could be used only on IGNORED AUs.
+ * The caller should care whether it's ignored or not before using this func.
+ */
+s32 amap_unmark_ignore(struct super_block *sb, u32 clu)
+{
+ AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
+ AU_INFO_T *au;
+
+ BUG_ON(!amap);
+
+ au = GET_AU(amap, i_AU_of_CLU(amap, clu));
+
+ BUG_ON(!IS_AU_IGNORED(au, amap));
+ // BUG_ON(GET_IGN_CNT(au) == 0);
+
+ amap_remove_from_list(au, &amap->slist_ignored);
+ amap_add_cold_au(amap, au);
+
+ BUG_ON(IS_AU_IGNORED(au, amap));
+
+ //DEC_IGN_CNT(au);
+
+ MMSG("AMAP: Unmark ignored AU (%d)\n", au->idx);
+
+ return 0;
+}
+
+/*
+ * Unmark all ignored AU
+ * This will return # of unmarked AUs
+ */
+s32 amap_unmark_ignore_all(struct super_block *sb)
+{
+ AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
+ struct slist_head *entry;
+ AU_INFO_T *au;
+ int n = 0;
+
+ BUG_ON(!amap);
+ entry = amap->slist_ignored.next;
+ while (entry) {
+ au = list_entry(entry, AU_INFO_T, shead);
+
+ BUG_ON(au != GET_AU(amap, au->idx));
+ BUG_ON(!IS_AU_IGNORED(au, amap));
+
+ //CLEAR_IGN_CNT(au);
+ amap_remove_from_list(au, &amap->slist_ignored);
+ amap_add_cold_au(amap, au);
+
+ MMSG("AMAP: Unmark ignored AU (%d)\n", au->idx);
+ n++;
+
+ entry = amap->slist_ignored.next;
+ }
+
+ BUG_ON(amap->slist_ignored.next != NULL);
+ MMSG("AMAP: unmark_ignore_all, total %d AUs\n", n);
+
+ return n;
+}
+
+/**
+ * @fn amap_get_au_stat
+ * @brief report AUs status depending on mode
+ * @return positive on success, 0 otherwise
+ * @param sbi super block info
+ * @param mode TOTAL, CLEAN and FULL
+ */
+u32 amap_get_au_stat(struct super_block *sb, s32 mode)
+{
+ AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
+
+ if (!amap)
+ return 0;
+
+ if (mode == VOL_AU_STAT_TOTAL)
+ return amap->n_au;
+ else if (mode == VOL_AU_STAT_CLEAN)
+ return amap->n_clean_au;
+ else if (mode == VOL_AU_STAT_FULL)
+ return amap->n_full_au;
+
+ return 0;
+}
+
--- /dev/null
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _SDFAT_AMAP_H
+#define _SDFAT_AMAP_H
+
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+
+/* AMAP Configuration Variable */
+#define SMART_ALLOC_N_HOT_AU (5)
+
+/* Allocating Destination (for smart allocator):
+ * moved to sdfat.h
+ */
+/*
+ * #define ALLOC_COLD_ALIGNED (1)
+ * #define ALLOC_COLD_PACKING (2)
+ * #define ALLOC_COLD_SEQ (4)
+ */
+
+/* Minimum sectors for support AMAP create */
+#define AMAP_MIN_SUPPORT_SECTORS (1048576)
+
+#define amap_add_hot_au(amap, au) amap_insert_to_list(au, &amap->slist_hot)
+
+/* singly linked list */
+struct slist_head {
+ struct slist_head *next;
+ struct slist_head *head;
+};
+
+/* AU entry type */
+typedef struct __AU_INFO_T {
+ uint16_t idx; /* the index of the AU (0, 1, 2, ... ) */
+ uint16_t free_clusters; /* # of available cluster */
+ union {
+ struct list_head head;
+ struct slist_head shead;/* singly linked list head for hot list */
+ };
+} AU_INFO_T;
+
+
+/* Allocation Target AU */
+typedef struct __TARGET_AU_T {
+ AU_INFO_T *au; /* Working AU */
+ uint16_t idx; /* Intra-AU cluster index */
+ uint16_t clu_to_skip; /* Clusters to skip */
+} TARGET_AU_T;
+
+
+/* AMAP free-clusters-based node */
+typedef struct {
+ struct list_head head; /* the list of AUs */
+} FCLU_NODE_T;
+
+
+/* AMAP options */
+typedef struct {
+ unsigned int packing_ratio; /* Tunable packing ratio */
+ unsigned int au_size; /* AU size in sectors */
+ unsigned int au_align_factor; /* Hidden sectors % au_size */
+} AMAP_OPT_T;
+
+typedef struct __AMAP_T {
+ spinlock_t amap_lock; /* obsolete */
+ struct super_block *sb;
+
+ int n_au;
+ int n_clean_au, n_full_au;
+ int clu_align_bias;
+ uint16_t clusters_per_au;
+ AU_INFO_T **au_table; /* An array of AU_INFO entries */
+ AMAP_OPT_T option;
+
+ /* Size-based AU management pool (cold) */
+ FCLU_NODE_T *fclu_nodes; /* An array of listheads */
+ int fclu_order; /* Page order that fclu_nodes needs */
+ int fclu_hint; /* maximum # of free clusters in an AU */
+
+ /* Hot AU list */
+ unsigned int total_fclu_hot; /* Free clusters in hot list */
+ struct slist_head slist_hot; /* Hot AU list */
+
+ /* Ignored AU list */
+ struct slist_head slist_ignored;
+
+ /* Allocator variables (keep 2 AUs at maximum) */
+ TARGET_AU_T cur_cold;
+ TARGET_AU_T cur_hot;
+ int n_need_packing;
+} AMAP_T;
+
+
+/* AU table */
+#define N_AU_PER_TABLE (int)(PAGE_SIZE / sizeof(AU_INFO_T))
+#define GET_AU(amap, i_AU) (amap->au_table[(i_AU) / N_AU_PER_TABLE] + ((i_AU) % N_AU_PER_TABLE))
+//#define MAX_CLU_PER_AU (int)(PAGE_SIZE / sizeof(FCLU_NODE_T))
+#define MAX_CLU_PER_AU (1024)
+
+/* Cold AU bucket <-> # of freeclusters */
+#define NODE_CLEAN(amap) (&amap->fclu_nodes[amap->clusters_per_au - 1])
+#define NODE(fclu, amap) (&amap->fclu_nodes[fclu - 1])
+#define FREE_CLUSTERS(node, amap) ((int)(node - amap->fclu_nodes) + 1)
+
+/* AU status */
+#define MAGIC_WORKING ((struct slist_head *)0xFFFF5091)
+#define IS_AU_HOT(au, amap) (au->shead.head == &amap->slist_hot)
+#define IS_AU_IGNORED(au, amap) (au->shead.head == &amap->slist_ignored)
+#define IS_AU_WORKING(au, amap) (au->shead.head == MAGIC_WORKING)
+#define SET_AU_WORKING(au) (au->shead.head = MAGIC_WORKING)
+
+/* AU <-> cluster */
+#define i_AU_of_CLU(amap, clu) ((amap->clu_align_bias + clu) / amap->clusters_per_au)
+#define CLU_of_i_AU(amap, i_au, idx) \
+ ((uint32_t)(i_au) * (uint32_t)amap->clusters_per_au + (idx) - amap->clu_align_bias)
+
+/*
+ * NOTE : AMAP internal functions are moved to core.h
+ */
+
+#endif /* _SDFAT_AMAP_H */
--- /dev/null
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/************************************************************************/
+/* */
+/* PROJECT : exFAT & FAT12/16/32 File System */
+/* FILE : sdfat_api.c */
+/* PURPOSE : sdFAT volume lock layer */
+/* */
+/************************************************************************/
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+
+#include "version.h"
+#include "config.h"
+
+#include "sdfat.h"
+#include "core.h"
+
+/*----------------------------------------------------------------------*/
+/* Internal structures */
+/*----------------------------------------------------------------------*/
+
+/*----------------------------------------------------------------------*/
+/* Constant & Macro Definitions */
+/*----------------------------------------------------------------------*/
+static DEFINE_MUTEX(_lock_core);
+
+/*----------------------------------------------------------------------*/
+/* Global Variable Definitions */
+/*----------------------------------------------------------------------*/
+
+/*----------------------------------------------------------------------*/
+/* Local Variable Definitions */
+/*----------------------------------------------------------------------*/
+
+/*----------------------------------------------------------------------*/
+/* Local Function Declarations */
+/*----------------------------------------------------------------------*/
+
+/*======================================================================*/
+/* Global Function Definitions */
+/* - All functions for global use have same return value format, */
+/* that is, 0 on success and minus error number on */
+/* various error condition. */
+/*======================================================================*/
+
+/*----------------------------------------------------------------------*/
+/* sdFAT Filesystem Init & Exit Functions */
+/*----------------------------------------------------------------------*/
+
+s32 fsapi_init(void)
+{
+ return fscore_init();
+}
+
+s32 fsapi_shutdown(void)
+{
+ return fscore_shutdown();
+}
+
+/*----------------------------------------------------------------------*/
+/* Volume Management Functions */
+/*----------------------------------------------------------------------*/
+
+/* mount the file system volume */
+s32 fsapi_mount(struct super_block *sb)
+{
+ s32 err;
+
+ /* acquire the core lock for file system ccritical section */
+ mutex_lock(&_lock_core);
+
+ err = meta_cache_init(sb);
+ if (err)
+ goto out;
+
+ err = fscore_mount(sb);
+out:
+ if (err)
+ meta_cache_shutdown(sb);
+
+ /* release the core lock for file system critical section */
+ mutex_unlock(&_lock_core);
+
+ return err;
+}
+EXPORT_SYMBOL(fsapi_mount);
+
+/* unmount the file system volume */
+s32 fsapi_umount(struct super_block *sb)
+{
+ s32 err;
+
+ /* acquire the core lock for file system ccritical section */
+ mutex_lock(&_lock_core);
+
+ mutex_lock(&(SDFAT_SB(sb)->s_vlock));
+ err = fscore_umount(sb);
+ meta_cache_shutdown(sb);
+ mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
+
+ /* release the core lock for file system critical section */
+ mutex_unlock(&_lock_core);
+
+ return err;
+}
+EXPORT_SYMBOL(fsapi_umount);
+
+/* get the information of a file system volume */
+s32 fsapi_statfs(struct super_block *sb, VOL_INFO_T *info)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ /* check the validity of pointer parameters */
+ ASSERT(info);
+
+ if (fsi->used_clusters == (u32) ~0) {
+ s32 err;
+
+ mutex_lock(&(SDFAT_SB(sb)->s_vlock));
+ err = fscore_statfs(sb, info);
+ mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
+ return err;
+ }
+
+ info->FatType = fsi->vol_type;
+ info->ClusterSize = fsi->cluster_size;
+ info->NumClusters = fsi->num_clusters - 2; /* clu 0 & 1 */
+ info->UsedClusters = fsi->used_clusters + fsi->reserved_clusters;
+ info->FreeClusters = info->NumClusters - info->UsedClusters;
+
+ return 0;
+}
+EXPORT_SYMBOL(fsapi_statfs);
+
+/* synchronize a file system volume */
+s32 fsapi_sync_fs(struct super_block *sb, s32 do_sync)
+{
+ s32 err;
+
+ mutex_lock(&(SDFAT_SB(sb)->s_vlock));
+ err = fscore_sync_fs(sb, do_sync);
+ mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
+ return err;
+}
+EXPORT_SYMBOL(fsapi_sync_fs);
+
+s32 fsapi_set_vol_flags(struct super_block *sb, u16 new_flag, s32 always_sync)
+{
+ s32 err;
+
+ mutex_lock(&(SDFAT_SB(sb)->s_vlock));
+ err = fscore_set_vol_flags(sb, new_flag, always_sync);
+ mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
+ return err;
+}
+EXPORT_SYMBOL(fsapi_set_vol_flags);
+
+/*----------------------------------------------------------------------*/
+/* File Operation Functions */
+/*----------------------------------------------------------------------*/
+
+/* lookup */
+s32 fsapi_lookup(struct inode *inode, u8 *path, FILE_ID_T *fid)
+{
+ s32 err;
+ struct super_block *sb = inode->i_sb;
+
+ /* check the validity of pointer parameters */
+ ASSERT(fid && path);
+
+ if (unlikely(!strlen(path)))
+ return -EINVAL;
+
+ mutex_lock(&(SDFAT_SB(sb)->s_vlock));
+ err = fscore_lookup(inode, path, fid);
+ mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
+ return err;
+}
+EXPORT_SYMBOL(fsapi_lookup);
+
+/* create a file */
+s32 fsapi_create(struct inode *inode, u8 *path, u8 mode, FILE_ID_T *fid)
+{
+ s32 err;
+ struct super_block *sb = inode->i_sb;
+
+ /* check the validity of pointer parameters */
+ ASSERT(fid && path);
+
+ if (unlikely(!strlen(path)))
+ return -EINVAL;
+
+ mutex_lock(&(SDFAT_SB(sb)->s_vlock));
+ err = fscore_create(inode, path, mode, fid);
+ mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
+ return err;
+}
+EXPORT_SYMBOL(fsapi_create);
+
+/* read the target string of symlink */
+s32 fsapi_read_link(struct inode *inode, FILE_ID_T *fid, void *buffer, u64 count, u64 *rcount)
+{
+ s32 err;
+ struct super_block *sb = inode->i_sb;
+
+ /* check the validity of pointer parameters */
+ ASSERT(fid && buffer);
+
+ mutex_lock(&(SDFAT_SB(sb)->s_vlock));
+ err = fscore_read_link(inode, fid, buffer, count, rcount);
+ mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
+ return err;
+}
+EXPORT_SYMBOL(fsapi_read_link);
+
+/* write the target string of symlink */
+s32 fsapi_write_link(struct inode *inode, FILE_ID_T *fid, void *buffer, u64 count, u64 *wcount)
+{
+ s32 err;
+ struct super_block *sb = inode->i_sb;
+
+ /* check the validity of pointer parameters */
+ ASSERT(fid && buffer);
+
+ mutex_lock(&(SDFAT_SB(sb)->s_vlock));
+ err = fscore_write_link(inode, fid, buffer, count, wcount);
+ mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
+ return err;
+}
+EXPORT_SYMBOL(fsapi_write_link);
+
+/* resize the file length */
+s32 fsapi_truncate(struct inode *inode, u64 old_size, u64 new_size)
+{
+ s32 err;
+ struct super_block *sb = inode->i_sb;
+
+ mutex_lock(&(SDFAT_SB(sb)->s_vlock));
+ TMSG("%s entered (inode %p size %llu)\n", __func__, inode, new_size);
+ err = fscore_truncate(inode, old_size, new_size);
+ TMSG("%s exitted (%d)\n", __func__, err);
+ mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
+ return err;
+}
+EXPORT_SYMBOL(fsapi_truncate);
+
+/* rename or move a old file into a new file */
+s32 fsapi_rename(struct inode *old_parent_inode, FILE_ID_T *fid,
+ struct inode *new_parent_inode, struct dentry *new_dentry)
+{
+ s32 err;
+ struct super_block *sb = old_parent_inode->i_sb;
+
+ /* check the validity of pointer parameters */
+ ASSERT(fid);
+
+ mutex_lock(&(SDFAT_SB(sb)->s_vlock));
+ err = fscore_rename(old_parent_inode, fid, new_parent_inode, new_dentry);
+ mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
+ return err;
+}
+EXPORT_SYMBOL(fsapi_rename);
+
+/* remove a file */
+s32 fsapi_remove(struct inode *inode, FILE_ID_T *fid)
+{
+ s32 err;
+ struct super_block *sb = inode->i_sb;
+
+ /* check the validity of pointer parameters */
+ ASSERT(fid);
+
+ mutex_lock(&(SDFAT_SB(sb)->s_vlock));
+ err = fscore_remove(inode, fid);
+ mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
+ return err;
+}
+EXPORT_SYMBOL(fsapi_remove);
+
+/* get the information of a given file */
+s32 fsapi_read_inode(struct inode *inode, DIR_ENTRY_T *info)
+{
+ s32 err;
+ struct super_block *sb = inode->i_sb;
+
+ mutex_lock(&(SDFAT_SB(sb)->s_vlock));
+ TMSG("%s entered (inode %p info %p\n", __func__, inode, info);
+ err = fscore_read_inode(inode, info);
+ TMSG("%s exited (err:%d)\n", __func__, err);
+ mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
+ return err;
+}
+EXPORT_SYMBOL(fsapi_read_inode);
+
+/* set the information of a given file */
+s32 fsapi_write_inode(struct inode *inode, DIR_ENTRY_T *info, int sync)
+{
+ s32 err;
+ struct super_block *sb = inode->i_sb;
+
+ mutex_lock(&(SDFAT_SB(sb)->s_vlock));
+ TMSG("%s entered (inode %p info %p sync:%d\n",
+ __func__, inode, info, sync);
+ err = fscore_write_inode(inode, info, sync);
+ TMSG("%s exited (err:%d)\n", __func__, err);
+ mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
+ return err;
+}
+EXPORT_SYMBOL(fsapi_write_inode);
+
+/* return the cluster number in the given cluster offset */
+s32 fsapi_map_clus(struct inode *inode, u32 clu_offset, u32 *clu, int dest)
+{
+ s32 err;
+ struct super_block *sb = inode->i_sb;
+
+ /* check the validity of pointer parameters */
+ ASSERT(clu);
+
+ mutex_lock(&(SDFAT_SB(sb)->s_vlock));
+ TMSG("%s entered (inode:%p clus:%08x dest:%d\n",
+ __func__, inode, *clu, dest);
+ err = fscore_map_clus(inode, clu_offset, clu, dest);
+ TMSG("%s exited (clu:%08x err:%d)\n", __func__, *clu, err);
+ mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
+ return err;
+}
+EXPORT_SYMBOL(fsapi_map_clus);
+
+/* reserve a cluster */
+s32 fsapi_reserve_clus(struct inode *inode)
+{
+ s32 err;
+ struct super_block *sb = inode->i_sb;
+
+ mutex_lock(&(SDFAT_SB(sb)->s_vlock));
+ TMSG("%s entered (inode:%p)\n", __func__, inode);
+ err = fscore_reserve_clus(inode);
+ TMSG("%s exited (err:%d)\n", __func__, err);
+ mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
+ return err;
+}
+EXPORT_SYMBOL(fsapi_reserve_clus);
+
+/*----------------------------------------------------------------------*/
+/* Directory Operation Functions */
+/*----------------------------------------------------------------------*/
+
+/* create(make) a directory */
+s32 fsapi_mkdir(struct inode *inode, u8 *path, FILE_ID_T *fid)
+{
+ s32 err;
+ struct super_block *sb = inode->i_sb;
+
+ /* check the validity of pointer parameters */
+ ASSERT(fid && path);
+
+ if (unlikely(!strlen(path)))
+ return -EINVAL;
+
+ mutex_lock(&(SDFAT_SB(sb)->s_vlock));
+ err = fscore_mkdir(inode, path, fid);
+ mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
+ return err;
+}
+EXPORT_SYMBOL(fsapi_mkdir);
+
+/* read a directory entry from the opened directory */
+s32 fsapi_readdir(struct inode *inode, DIR_ENTRY_T *dir_entry)
+{
+ s32 err;
+ struct super_block *sb = inode->i_sb;
+
+ /* check the validity of pointer parameters */
+ ASSERT(dir_entry);
+
+ mutex_lock(&(SDFAT_SB(sb)->s_vlock));
+ err = fscore_readdir(inode, dir_entry);
+ mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
+ return err;
+}
+EXPORT_SYMBOL(fsapi_readdir);
+
+/* remove a directory */
+s32 fsapi_rmdir(struct inode *inode, FILE_ID_T *fid)
+{
+ s32 err;
+ struct super_block *sb = inode->i_sb;
+
+ /* check the validity of pointer parameters */
+ ASSERT(fid);
+
+ mutex_lock(&(SDFAT_SB(sb)->s_vlock));
+ err = fscore_rmdir(inode, fid);
+ mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
+ return err;
+}
+EXPORT_SYMBOL(fsapi_rmdir);
+
+/* unlink a file.
+ * that is, remove an entry from a directory. BUT don't truncate
+ */
+s32 fsapi_unlink(struct inode *inode, FILE_ID_T *fid)
+{
+ s32 err;
+ struct super_block *sb = inode->i_sb;
+
+ /* check the validity of pointer parameters */
+ ASSERT(fid);
+ mutex_lock(&(SDFAT_SB(sb)->s_vlock));
+ err = fscore_unlink(inode, fid);
+ mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
+ return err;
+}
+EXPORT_SYMBOL(fsapi_unlink);
+
+/* reflect the internal dirty flags to VFS bh dirty flags */
+s32 fsapi_cache_flush(struct super_block *sb, int do_sync)
+{
+ mutex_lock(&(SDFAT_SB(sb)->s_vlock));
+ fcache_flush(sb, do_sync);
+ dcache_flush(sb, do_sync);
+ mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
+ return 0;
+}
+EXPORT_SYMBOL(fsapi_cache_flush);
+
+/* release FAT & buf cache */
+s32 fsapi_cache_release(struct super_block *sb)
+{
+#ifdef CONFIG_SDFAT_DEBUG
+ mutex_lock(&(SDFAT_SB(sb)->s_vlock));
+
+ fcache_release_all(sb);
+ dcache_release_all(sb);
+
+ mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
+#endif /* CONFIG_SDFAT_DEBUG */
+ return 0;
+}
+EXPORT_SYMBOL(fsapi_cache_release);
+
+u32 fsapi_get_au_stat(struct super_block *sb, s32 mode)
+{
+ /* volume lock is not required */
+ return fscore_get_au_stat(sb, mode);
+}
+EXPORT_SYMBOL(fsapi_get_au_stat);
+
+/* clear extent cache */
+void fsapi_invalidate_extent(struct inode *inode)
+{
+ /* Volume lock is not required,
+ * because it is only called by evict_inode.
+ * If any other function can call it,
+ * you should check whether volume lock is needed or not.
+ */
+ extent_cache_inval_inode(inode);
+}
+EXPORT_SYMBOL(fsapi_invalidate_extent);
+
+/* check device is ejected */
+s32 fsapi_check_bdi_valid(struct super_block *sb)
+{
+ return fscore_check_bdi_valid(sb);
+}
+EXPORT_SYMBOL(fsapi_check_bdi_valid);
+
+
+
+#ifdef CONFIG_SDFAT_DFR
+/*----------------------------------------------------------------------*/
+/* Defragmentation related */
+/*----------------------------------------------------------------------*/
+s32 fsapi_dfr_get_info(struct super_block *sb, void *arg)
+{
+ /* volume lock is not required */
+ return defrag_get_info(sb, (struct defrag_info_arg *)arg);
+}
+EXPORT_SYMBOL(fsapi_dfr_get_info);
+
+s32 fsapi_dfr_scan_dir(struct super_block *sb, void *args)
+{
+ s32 err;
+
+ /* check the validity of pointer parameters */
+ ASSERT(args);
+
+ mutex_lock(&(SDFAT_SB(sb)->s_vlock));
+ err = defrag_scan_dir(sb, (struct defrag_trav_arg *)args);
+ mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
+ return err;
+}
+EXPORT_SYMBOL(fsapi_dfr_scan_dir);
+
+s32 fsapi_dfr_validate_clus(struct inode *inode, void *chunk, int skip_prev)
+{
+ s32 err;
+ struct super_block *sb = inode->i_sb;
+
+ mutex_lock(&(SDFAT_SB(sb)->s_vlock));
+ err = defrag_validate_cluster(inode,
+ (struct defrag_chunk_info *)chunk, skip_prev);
+ mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
+ return err;
+}
+EXPORT_SYMBOL(fsapi_dfr_validate_clus);
+
+s32 fsapi_dfr_reserve_clus(struct super_block *sb, s32 nr_clus)
+{
+ s32 err;
+
+ mutex_lock(&(SDFAT_SB(sb)->s_vlock));
+ err = defrag_reserve_clusters(sb, nr_clus);
+ mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
+ return err;
+}
+EXPORT_SYMBOL(fsapi_dfr_reserve_clus);
+
+s32 fsapi_dfr_mark_ignore(struct super_block *sb, unsigned int clus)
+{
+ /* volume lock is not required */
+ return defrag_mark_ignore(sb, clus);
+}
+EXPORT_SYMBOL(fsapi_dfr_mark_ignore);
+
+void fsapi_dfr_unmark_ignore_all(struct super_block *sb)
+{
+ /* volume lock is not required */
+ defrag_unmark_ignore_all(sb);
+}
+EXPORT_SYMBOL(fsapi_dfr_unmark_ignore_all);
+
+s32 fsapi_dfr_map_clus(struct inode *inode, u32 clu_offset, u32 *clu)
+{
+ s32 err;
+ struct super_block *sb = inode->i_sb;
+
+ /* check the validity of pointer parameters */
+ ASSERT(clu);
+
+ mutex_lock(&(SDFAT_SB(sb)->s_vlock));
+ err = defrag_map_cluster(inode, clu_offset, clu);
+ mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
+
+ return err;
+}
+EXPORT_SYMBOL(fsapi_dfr_map_clus);
+
+void fsapi_dfr_writepage_endio(struct page *page)
+{
+ /* volume lock is not required */
+ defrag_writepage_end_io(page);
+}
+EXPORT_SYMBOL(fsapi_dfr_writepage_endio);
+
+void fsapi_dfr_update_fat_prev(struct super_block *sb, int force)
+{
+ mutex_lock(&(SDFAT_SB(sb)->s_vlock));
+ defrag_update_fat_prev(sb, force);
+ mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
+}
+EXPORT_SYMBOL(fsapi_dfr_update_fat_prev);
+
+void fsapi_dfr_update_fat_next(struct super_block *sb)
+{
+ mutex_lock(&(SDFAT_SB(sb)->s_vlock));
+ defrag_update_fat_next(sb);
+ mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
+}
+EXPORT_SYMBOL(fsapi_dfr_update_fat_next);
+
+void fsapi_dfr_check_discard(struct super_block *sb)
+{
+ mutex_lock(&(SDFAT_SB(sb)->s_vlock));
+ defrag_check_discard(sb);
+ mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
+}
+EXPORT_SYMBOL(fsapi_dfr_check_discard);
+
+void fsapi_dfr_free_clus(struct super_block *sb, u32 clus)
+{
+ mutex_lock(&(SDFAT_SB(sb)->s_vlock));
+ defrag_free_cluster(sb, clus);
+ mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
+}
+EXPORT_SYMBOL(fsapi_dfr_free_clus);
+
+s32 fsapi_dfr_check_dfr_required(struct super_block *sb, int *totalau, int *cleanau, int *fullau)
+{
+ /* volume lock is not required */
+ return defrag_check_defrag_required(sb, totalau, cleanau, fullau);
+}
+EXPORT_SYMBOL(fsapi_dfr_check_dfr_required);
+
+s32 fsapi_dfr_check_dfr_on(struct inode *inode, loff_t start, loff_t end, s32 cancel, const char *caller)
+{
+ /* volume lock is not required */
+ return defrag_check_defrag_on(inode, start, end, cancel, caller);
+}
+EXPORT_SYMBOL(fsapi_dfr_check_dfr_on);
+
+
+
+#ifdef CONFIG_SDFAT_DFR_DEBUG
+void fsapi_dfr_spo_test(struct super_block *sb, int flag, const char *caller)
+{
+ /* volume lock is not required */
+ defrag_spo_test(sb, flag, caller);
+}
+EXPORT_SYMBOL(fsapi_dfr_spo_test);
+#endif
+
+
+#endif /* CONFIG_SDFAT_DFR */
+
+/* end of sdfat_api.c */
--- /dev/null
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _SDFAT_API_H
+#define _SDFAT_API_H
+
+#include "config.h"
+#include "sdfat_fs.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+
+/*----------------------------------------------------------------------*/
+/* Configure Constant & Macro Definitions */
+/*----------------------------------------------------------------------*/
+/* cache size (in number of sectors) */
+/* (should be an exponential value of 2) */
+#define FAT_CACHE_SIZE 128
+#define FAT_CACHE_HASH_SIZE 64
+#define BUF_CACHE_SIZE 256
+#define BUF_CACHE_HASH_SIZE 64
+
+/* Read-ahead related */
+/* First config vars. should be pow of 2 */
+#define FCACHE_MAX_RA_SIZE (PAGE_SIZE)
+#define DCACHE_MAX_RA_SIZE (128*1024)
+
+/*----------------------------------------------------------------------*/
+/* Constant & Macro Definitions */
+/*----------------------------------------------------------------------*/
+/* type values */
+#define TYPE_UNUSED 0x0000
+#define TYPE_DELETED 0x0001
+#define TYPE_INVALID 0x0002
+#define TYPE_CRITICAL_PRI 0x0100
+#define TYPE_BITMAP 0x0101
+#define TYPE_UPCASE 0x0102
+#define TYPE_VOLUME 0x0103
+#define TYPE_DIR 0x0104
+#define TYPE_FILE 0x011F
+#define TYPE_SYMLINK 0x015F
+#define TYPE_CRITICAL_SEC 0x0200
+#define TYPE_STREAM 0x0201
+#define TYPE_EXTEND 0x0202
+#define TYPE_ACL 0x0203
+#define TYPE_BENIGN_PRI 0x0400
+#define TYPE_GUID 0x0401
+#define TYPE_PADDING 0x0402
+#define TYPE_ACLTAB 0x0403
+#define TYPE_BENIGN_SEC 0x0800
+#define TYPE_ALL 0x0FFF
+
+/* eio values */
+#define SDFAT_EIO_NONE (0x00000000)
+#define SDFAT_EIO_READ (0x00000001)
+#define SDFAT_EIO_WRITE (0x00000002)
+#define SDFAT_EIO_BDI (0x00000004)
+
+/* modes for volume allocation unit status */
+#define VOL_AU_STAT_TOTAL (0)
+#define VOL_AU_STAT_CLEAN (1)
+#define VOL_AU_STAT_FULL (2)
+
+/*----------------------------------------------------------------------*/
+/* NLS Type Definitions */
+/*----------------------------------------------------------------------*/
+
+/* DOS name structure */
+typedef struct {
+ u8 name[DOS_NAME_LENGTH];
+ u8 name_case;
+} DOS_NAME_T;
+
+/* unicode name structure */
+typedef struct {
+ u16 name[MAX_NAME_LENGTH+3]; /* +3 for null and for converting */
+ u16 name_hash;
+ u8 name_len;
+} UNI_NAME_T;
+
+/*----------------------------------------------------------------------*/
+/* Type Definitions */
+/*----------------------------------------------------------------------*/
+/* should be merged it to DATE_TIME_T */
+typedef union {
+ struct {
+ u8 off : 7;
+ u8 valid : 1;
+ };
+ u8 value;
+} TIMEZONE_T;
+
+typedef struct {
+ u16 sec; /* 0 ~ 59 */
+ u16 min; /* 0 ~ 59 */
+ u16 hour; /* 0 ~ 23 */
+ u16 day; /* 1 ~ 31 */
+ u16 mon; /* 1 ~ 12 */
+ u16 year; /* 0 ~ 127 (since 1980) */
+ TIMEZONE_T tz;
+} TIMESTAMP_T;
+
+typedef struct {
+ u16 Year;
+ u16 Month;
+ u16 Day;
+ u16 Hour;
+ u16 Minute;
+ u16 Second;
+ u16 MilliSecond;
+ TIMEZONE_T Timezone;
+} DATE_TIME_T;
+
+typedef struct {
+ u64 Offset; // start sector number of the partition
+ u64 Size; // in sectors
+} PART_INFO_T;
+
+typedef struct {
+ u32 SecSize; // sector size in bytes
+ u64 DevSize; // block device size in sectors
+} DEV_INFO_T;
+
+typedef struct {
+ u32 FatType;
+ u32 ClusterSize;
+ u32 NumClusters;
+ u32 FreeClusters;
+ u32 UsedClusters;
+} VOL_INFO_T;
+
+/* directory structure */
+typedef struct {
+ u32 dir;
+ u32 size;
+ u8 flags;
+} CHAIN_T;
+
+/* hint structure */
+typedef struct {
+ u32 clu;
+ union {
+ u32 off; // cluster offset
+ s32 eidx; // entry index
+ };
+} HINT_T;
+
+typedef struct {
+ spinlock_t cache_lru_lock;
+ struct list_head cache_lru;
+ s32 nr_caches;
+ u32 cache_valid_id; // for avoiding the race between alloc and free
+} EXTENT_T;
+
+/* first empty entry hint information */
+typedef struct {
+ s32 eidx; // entry index of a directory
+ s32 count; // count of continuous empty entry
+ CHAIN_T cur; // the cluster that first empty slot exists in
+} HINT_FEMP_T;
+
+/* file id structure */
+typedef struct {
+ CHAIN_T dir;
+ s32 entry;
+ u32 type;
+ u32 attr;
+ u32 start_clu;
+ u64 size;
+ u8 flags;
+ u8 reserved[3]; // padding
+ u32 version; // the copy of low 32bit of i_version to check the validation of hint_stat
+ s64 rwoffset; // file offset or dentry index for readdir
+ EXTENT_T extent; // extent cache for a file
+ HINT_T hint_bmap; // hint for cluster last accessed
+ HINT_T hint_stat; // hint for entry index we try to lookup next time
+ HINT_FEMP_T hint_femp; // hint for first empty entry
+} FILE_ID_T;
+
+typedef struct {
+ s8 *lfn;
+ s8 *sfn;
+ s32 lfnbuf_len; //usally MAX_UNINAME_BUF_SIZE
+ s32 sfnbuf_len; //usally MAX_DOSNAME_BUF_SIZE, used only for vfat, not for exfat
+} DENTRY_NAMEBUF_T;
+
+typedef struct {
+ u32 Attr;
+ u64 Size;
+ u32 NumSubdirs;
+ DATE_TIME_T CreateTimestamp;
+ DATE_TIME_T ModifyTimestamp;
+ DATE_TIME_T AccessTimestamp;
+ DENTRY_NAMEBUF_T NameBuf;
+} DIR_ENTRY_T;
+
+/* cache information */
+typedef struct __cache_entry {
+ struct __cache_entry *next;
+ struct __cache_entry *prev;
+ struct {
+ struct __cache_entry *next;
+ struct __cache_entry *prev;
+ } hash;
+ u64 sec;
+ u32 flag;
+ struct buffer_head *bh;
+} cache_ent_t;
+
+/*----------------------------------------------------------------------*/
+/* Type Definitions : Wrapper & In-Core */
+/*----------------------------------------------------------------------*/
+typedef struct __FATENT_OPS_T {
+ s32 (*ent_get)(struct super_block *sb, u32 loc, u32 *content);
+ s32 (*ent_set)(struct super_block *sb, u32 loc, u32 content);
+} FATENT_OPS_T;
+
+typedef struct {
+ s32 (*alloc_cluster)(struct super_block *, u32, CHAIN_T *, s32);
+ s32 (*free_cluster)(struct super_block *, CHAIN_T *, s32);
+ s32 (*count_used_clusters)(struct super_block *, u32 *);
+ s32 (*init_dir_entry)(struct super_block *, CHAIN_T *, s32, u32, u32, u64);
+ s32 (*init_ext_entry)(struct super_block *, CHAIN_T *, s32, s32, UNI_NAME_T *, DOS_NAME_T *);
+ s32 (*find_dir_entry)(struct super_block *, FILE_ID_T *, CHAIN_T *, UNI_NAME_T *, s32, DOS_NAME_T *, u32);
+ s32 (*delete_dir_entry)(struct super_block *, CHAIN_T *, s32, s32, s32);
+ void (*get_uniname_from_ext_entry)(struct super_block *, CHAIN_T *, s32, u16 *);
+ s32 (*count_ext_entries)(struct super_block *, CHAIN_T *, s32, DENTRY_T *);
+ s32 (*calc_num_entries)(UNI_NAME_T *);
+ s32 (*check_max_dentries)(FILE_ID_T *);
+ u32 (*get_entry_type)(DENTRY_T *);
+ void (*set_entry_type)(DENTRY_T *, u32);
+ u32 (*get_entry_attr)(DENTRY_T *);
+ void (*set_entry_attr)(DENTRY_T *, u32);
+ u8 (*get_entry_flag)(DENTRY_T *);
+ void (*set_entry_flag)(DENTRY_T *, u8);
+ u32 (*get_entry_clu0)(DENTRY_T *);
+ void (*set_entry_clu0)(DENTRY_T *, u32);
+ u64 (*get_entry_size)(DENTRY_T *);
+ void (*set_entry_size)(DENTRY_T *, u64);
+ void (*get_entry_time)(DENTRY_T *, TIMESTAMP_T *, u8);
+ void (*set_entry_time)(DENTRY_T *, TIMESTAMP_T *, u8);
+ u32 (*get_au_stat)(struct super_block *, s32);
+} FS_FUNC_T;
+
+typedef struct __FS_INFO_T {
+ s32 bd_opened; // opened or not
+ u32 vol_type; // volume FAT type
+ u32 vol_id; // volume serial number
+ u64 num_sectors; // num of sectors in volume
+ u32 num_clusters; // num of clusters in volume
+ u32 cluster_size; // cluster size in bytes
+ u32 cluster_size_bits;
+ u32 sect_per_clus; // cluster size in sectors
+ u32 sect_per_clus_bits;
+ u64 FAT1_start_sector; // FAT1 start sector
+ u64 FAT2_start_sector; // FAT2 start sector
+ u64 root_start_sector; // root dir start sector
+ u64 data_start_sector; // data area start sector
+ u32 num_FAT_sectors; // num of FAT sectors
+ u32 root_dir; // root dir cluster
+ u32 dentries_in_root; // num of dentries in root dir
+ u32 dentries_per_clu; // num of dentries per cluster
+ u32 vol_flag; // volume dirty flag
+ struct buffer_head *pbr_bh; // buffer_head of PBR sector
+
+ u32 map_clu; // allocation bitmap start cluster
+ u32 map_sectors; // num of allocation bitmap sectors
+ struct buffer_head **vol_amap; // allocation bitmap
+
+ u16 **vol_utbl; // upcase table
+
+ u32 clu_srch_ptr; // cluster search pointer
+ u32 used_clusters; // number of used clusters
+
+ u32 prev_eio; // block device operation error flag
+
+ FS_FUNC_T *fs_func;
+ FATENT_OPS_T *fatent_ops;
+
+ s32 reserved_clusters; // # of reserved clusters (DA)
+ void *amap; // AU Allocation Map
+
+ /* fat cache */
+ struct {
+ cache_ent_t pool[FAT_CACHE_SIZE];
+ cache_ent_t lru_list;
+ cache_ent_t hash_list[FAT_CACHE_HASH_SIZE];
+ } fcache;
+
+ /* meta cache */
+ struct {
+ cache_ent_t pool[BUF_CACHE_SIZE];
+ cache_ent_t lru_list;
+ cache_ent_t keep_list; // CACHEs in this list will not be kicked by normal lru operations
+ cache_ent_t hash_list[BUF_CACHE_HASH_SIZE];
+ } dcache;
+} FS_INFO_T;
+
+/*======================================================================*/
+/* */
+/* API FUNCTION DECLARATIONS */
+/* (CHANGE THIS PART IF REQUIRED) */
+/* */
+/*======================================================================*/
+
+/*----------------------------------------------------------------------*/
+/* External Function Declarations */
+/*----------------------------------------------------------------------*/
+
+/* file system initialization & shutdown functions */
+s32 fsapi_init(void);
+s32 fsapi_shutdown(void);
+
+/* volume management functions */
+s32 fsapi_mount(struct super_block *sb);
+s32 fsapi_umount(struct super_block *sb);
+s32 fsapi_statfs(struct super_block *sb, VOL_INFO_T *info);
+s32 fsapi_sync_fs(struct super_block *sb, s32 do_sync);
+s32 fsapi_set_vol_flags(struct super_block *sb, u16 new_flag, s32 always_sync);
+
+/* file management functions */
+s32 fsapi_lookup(struct inode *inode, u8 *path, FILE_ID_T *fid);
+s32 fsapi_create(struct inode *inode, u8 *path, u8 mode, FILE_ID_T *fid);
+s32 fsapi_read_link(struct inode *inode, FILE_ID_T *fid, void *buffer, u64 count, u64 *rcount);
+s32 fsapi_write_link(struct inode *inode, FILE_ID_T *fid, void *buffer, u64 count, u64 *wcount);
+s32 fsapi_remove(struct inode *inode, FILE_ID_T *fid); /* unlink and truncate */
+s32 fsapi_truncate(struct inode *inode, u64 old_size, u64 new_size);
+s32 fsapi_rename(struct inode *old_parent_inode, FILE_ID_T *fid,
+ struct inode *new_parent_inode, struct dentry *new_dentry);
+s32 fsapi_unlink(struct inode *inode, FILE_ID_T *fid);
+s32 fsapi_read_inode(struct inode *inode, DIR_ENTRY_T *info);
+s32 fsapi_write_inode(struct inode *inode, DIR_ENTRY_T *info, int sync);
+s32 fsapi_map_clus(struct inode *inode, u32 clu_offset, u32 *clu, int dest);
+s32 fsapi_reserve_clus(struct inode *inode);
+
+/* directory management functions */
+s32 fsapi_mkdir(struct inode *inode, u8 *path, FILE_ID_T *fid);
+s32 fsapi_readdir(struct inode *inode, DIR_ENTRY_T *dir_entry);
+s32 fsapi_rmdir(struct inode *inode, FILE_ID_T *fid);
+
+/* FAT & buf cache functions */
+s32 fsapi_cache_flush(struct super_block *sb, int do_sync);
+s32 fsapi_cache_release(struct super_block *sb);
+
+/* extra info functions */
+u32 fsapi_get_au_stat(struct super_block *sb, s32 mode);
+
+/* extent cache functions */
+void fsapi_invalidate_extent(struct inode *inode);
+
+/* bdev management */
+s32 fsapi_check_bdi_valid(struct super_block *sb);
+
+#ifdef CONFIG_SDFAT_DFR
+/*----------------------------------------------------------------------*/
+/* Defragmentation related */
+/*----------------------------------------------------------------------*/
+
+s32 fsapi_dfr_get_info(struct super_block *sb, void *arg);
+
+s32 fsapi_dfr_scan_dir(struct super_block *sb, void *args);
+
+s32 fsapi_dfr_validate_clus(struct inode *inode, void *chunk, int skip_prev);
+s32 fsapi_dfr_reserve_clus(struct super_block *sb, s32 nr_clus);
+s32 fsapi_dfr_mark_ignore(struct super_block *sb, unsigned int clus);
+void fsapi_dfr_unmark_ignore_all(struct super_block *sb);
+
+s32 fsapi_dfr_map_clus(struct inode *inode, u32 clu_offset, u32 *clu);
+void fsapi_dfr_writepage_endio(struct page *page);
+
+void fsapi_dfr_update_fat_prev(struct super_block *sb, int force);
+void fsapi_dfr_update_fat_next(struct super_block *sb);
+void fsapi_dfr_check_discard(struct super_block *sb);
+void fsapi_dfr_free_clus(struct super_block *sb, u32 clus);
+
+s32 fsapi_dfr_check_dfr_required(struct super_block *sb, int *totalau, int *cleanau, int *fullau);
+s32 fsapi_dfr_check_dfr_on(struct inode *inode, loff_t start, loff_t end, s32 cancel, const char *caller);
+
+
+#ifdef CONFIG_SDFAT_DFR_DEBUG
+void fsapi_dfr_spo_test(struct super_block *sb, int flag, const char *caller);
+#endif /* CONFIG_SDFAT_DFR_DEBUG */
+
+#endif /* CONFIG_SDFAT_DFR */
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _SDFAT_API_H */
+
+/* end of api.h */
--- /dev/null
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/************************************************************************/
+/* */
+/* PROJECT : exFAT & FAT12/16/32 File System */
+/* FILE : blkdev.c */
+/* PURPOSE : sdFAT Block Device Driver Glue Layer */
+/* */
+/*----------------------------------------------------------------------*/
+/* NOTES */
+/* */
+/************************************************************************/
+
+#include <linux/blkdev.h>
+#include <linux/log2.h>
+#include <linux/backing-dev.h>
+
+#include "sdfat.h"
+
+/*----------------------------------------------------------------------*/
+/* Constant & Macro Definitions */
+/*----------------------------------------------------------------------*/
+
+/*----------------------------------------------------------------------*/
+/* Global Variable Definitions */
+/*----------------------------------------------------------------------*/
+
+/*----------------------------------------------------------------------*/
+/* Local Variable Definitions */
+/*----------------------------------------------------------------------*/
+
+/*----------------------------------------------------------------------*/
+/* FUNCTIONS WHICH HAS KERNEL VERSION DEPENDENCY */
+/************************************************************************/
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)
+ /* EMPTY */
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) */
+static struct backing_dev_info *inode_to_bdi(struct inode *bd_inode)
+{
+ return bd_inode->i_mapping->backing_dev_info;
+}
+#endif
+
+/*======================================================================*/
+/* Function Definitions */
+/*======================================================================*/
+s32 bdev_open_dev(struct super_block *sb)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ if (fsi->bd_opened)
+ return 0;
+
+ fsi->bd_opened = true;
+ return 0;
+}
+
+s32 bdev_close_dev(struct super_block *sb)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ fsi->bd_opened = false;
+ return 0;
+}
+
+static inline s32 block_device_ejected(struct super_block *sb)
+{
+ struct inode *bd_inode = sb->s_bdev->bd_inode;
+ struct backing_dev_info *bdi = inode_to_bdi(bd_inode);
+
+ return (bdi->dev == NULL);
+}
+
+s32 bdev_check_bdi_valid(struct super_block *sb)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ if (block_device_ejected(sb)) {
+ if (!(fsi->prev_eio & SDFAT_EIO_BDI)) {
+ fsi->prev_eio |= SDFAT_EIO_BDI;
+ sdfat_log_msg(sb, KERN_ERR, "%s: block device is "
+ "eliminated.(bdi:%p)", __func__, sb->s_bdi);
+ }
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+#if IS_BUILTIN(CONFIG_SDFAT_FS)
+static void __bdev_readahead(struct super_block *sb, u64 secno, u64 num_secs)
+{
+ u32 sects_per_page = (PAGE_SIZE >> sb->s_blocksize_bits);
+ struct blk_plug plug;
+ u64 i;
+
+ blk_start_plug(&plug);
+ for (i = 0; i < num_secs; i++) {
+ if (i && !(i & (sects_per_page - 1)))
+ blk_flush_plug(current);
+ sb_breadahead(sb, (sector_t)(secno + i));
+ }
+ blk_finish_plug(&plug);
+}
+#else
+static void __bdev_readahead(struct super_block *sb, u64 secno, u64 num_secs)
+{
+ u64 i;
+
+ for (i = 0; i < num_secs; i++)
+ sb_breadahead(sb, (sector_t)(secno + i));
+}
+#endif
+
+/* Make a readahead request */
+s32 bdev_readahead(struct super_block *sb, u64 secno, u64 num_secs)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ if (!fsi->bd_opened)
+ return -EIO;
+
+ __bdev_readahead(sb, secno, num_secs);
+
+ return 0;
+}
+
+s32 bdev_mread(struct super_block *sb, u64 secno, struct buffer_head **bh, u64 num_secs, s32 read)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ u8 blksize_bits = sb->s_blocksize_bits;
+#ifdef CONFIG_SDFAT_DBG_IOCTL
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ long flags = sbi->debug_flags;
+
+ if (flags & SDFAT_DEBUGFLAGS_ERROR_RW)
+ return -EIO;
+#endif /* CONFIG_SDFAT_DBG_IOCTL */
+
+ if (!fsi->bd_opened)
+ return -EIO;
+
+ brelse(*bh);
+
+ if (read)
+ *bh = __bread(sb->s_bdev, (sector_t)secno, num_secs << blksize_bits);
+ else
+ *bh = __getblk(sb->s_bdev, (sector_t)secno, num_secs << blksize_bits);
+
+ /* read successfully */
+ if (*bh)
+ return 0;
+
+ /*
+ * patch 1.2.4 : reset ONCE warning message per volume.
+ */
+ if (!(fsi->prev_eio & SDFAT_EIO_READ)) {
+ fsi->prev_eio |= SDFAT_EIO_READ;
+ sdfat_log_msg(sb, KERN_ERR, "%s: No bh. I/O error.", __func__);
+ sdfat_debug_warn_on(1);
+ }
+
+ return -EIO;
+}
+
+s32 bdev_mwrite(struct super_block *sb, u64 secno, struct buffer_head *bh, u64 num_secs, s32 sync)
+{
+ u64 count;
+ struct buffer_head *bh2;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+#ifdef CONFIG_SDFAT_DBG_IOCTL
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ long flags = sbi->debug_flags;
+
+ if (flags & SDFAT_DEBUGFLAGS_ERROR_RW)
+ return -EIO;
+#endif /* CONFIG_SDFAT_DBG_IOCTL */
+
+ if (!fsi->bd_opened)
+ return -EIO;
+
+ if (secno == bh->b_blocknr) {
+ set_buffer_uptodate(bh);
+ mark_buffer_dirty(bh);
+ if (sync && (sync_dirty_buffer(bh) != 0))
+ return -EIO;
+ } else {
+ count = num_secs << sb->s_blocksize_bits;
+
+ bh2 = __getblk(sb->s_bdev, (sector_t)secno, count);
+
+ if (!bh2)
+ goto no_bh;
+
+ lock_buffer(bh2);
+ memcpy(bh2->b_data, bh->b_data, count);
+ set_buffer_uptodate(bh2);
+ mark_buffer_dirty(bh2);
+ unlock_buffer(bh2);
+ if (sync && (sync_dirty_buffer(bh2) != 0)) {
+ __brelse(bh2);
+ goto no_bh;
+ }
+ __brelse(bh2);
+ }
+ return 0;
+no_bh:
+ /*
+ * patch 1.2.4 : reset ONCE warning message per volume.
+ */
+ if (!(fsi->prev_eio & SDFAT_EIO_WRITE)) {
+ fsi->prev_eio |= SDFAT_EIO_WRITE;
+ sdfat_log_msg(sb, KERN_ERR, "%s: No bh. I/O error.", __func__);
+ sdfat_debug_warn_on(1);
+ }
+
+ return -EIO;
+}
+
+s32 bdev_sync_all(struct super_block *sb)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+#ifdef CONFIG_SDFAT_DBG_IOCTL
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ long flags = sbi->debug_flags;
+
+ if (flags & SDFAT_DEBUGFLAGS_ERROR_RW)
+ return -EIO;
+#endif /* CONFIG_SDFAT_DBG_IOCTL */
+
+ if (!fsi->bd_opened)
+ return -EIO;
+
+ return sync_blockdev(sb->s_bdev);
+}
+
+/*
+ * Sector Read/Write Functions
+ */
+s32 read_sect(struct super_block *sb, u64 sec, struct buffer_head **bh, s32 read)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ BUG_ON(!bh);
+ if ((sec >= fsi->num_sectors) && (fsi->num_sectors > 0)) {
+ sdfat_fs_error_ratelimit(sb,
+ "%s: out of range (sect:%llu)", __func__, sec);
+ return -EIO;
+ }
+
+ if (bdev_mread(sb, sec, bh, 1, read)) {
+ sdfat_fs_error_ratelimit(sb,
+ "%s: I/O error (sect:%llu)", __func__, sec);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+s32 write_sect(struct super_block *sb, u64 sec, struct buffer_head *bh, s32 sync)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ BUG_ON(!bh);
+ if ((sec >= fsi->num_sectors) && (fsi->num_sectors > 0)) {
+ sdfat_fs_error_ratelimit(sb,
+ "%s: out of range (sect:%llu)", __func__, sec);
+ return -EIO;
+ }
+
+ if (bdev_mwrite(sb, sec, bh, 1, sync)) {
+ sdfat_fs_error_ratelimit(sb, "%s: I/O error (sect:%llu)",
+ __func__, sec);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+s32 read_msect(struct super_block *sb, u64 sec, struct buffer_head **bh, u64 num_secs, s32 read)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ BUG_ON(!bh);
+ if (((sec+num_secs) > fsi->num_sectors) && (fsi->num_sectors > 0)) {
+ sdfat_fs_error_ratelimit(sb, "%s: out of range(sect:%llu len:%llu)",
+ __func__, sec, num_secs);
+ return -EIO;
+ }
+
+ if (bdev_mread(sb, sec, bh, num_secs, read)) {
+ sdfat_fs_error_ratelimit(sb, "%s: I/O error (sect:%llu len:%llu)",
+ __func__, sec, num_secs);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+s32 write_msect(struct super_block *sb, u64 sec, struct buffer_head *bh, u64 num_secs, s32 sync)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ BUG_ON(!bh);
+ if (((sec+num_secs) > fsi->num_sectors) && (fsi->num_sectors > 0)) {
+ sdfat_fs_error_ratelimit(sb, "%s: out of range(sect:%llu len:%llu)",
+ __func__, sec, num_secs);
+ return -EIO;
+ }
+
+
+ if (bdev_mwrite(sb, sec, bh, num_secs, sync)) {
+ sdfat_fs_error_ratelimit(sb, "%s: I/O error (sect:%llu len:%llu)",
+ __func__, sec, num_secs);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static inline void __blkdev_write_bhs(struct buffer_head **bhs, s32 nr_bhs)
+{
+ s32 i;
+
+ for (i = 0; i < nr_bhs; i++)
+ write_dirty_buffer(bhs[i], WRITE);
+}
+
+static inline s32 __blkdev_sync_bhs(struct buffer_head **bhs, s32 nr_bhs)
+{
+ s32 i, err = 0;
+
+ for (i = 0; i < nr_bhs; i++) {
+ wait_on_buffer(bhs[i]);
+ if (!err && !buffer_uptodate(bhs[i]))
+ err = -EIO;
+ }
+ return err;
+}
+
+static inline s32 __buffer_zeroed(struct super_block *sb, u64 blknr, u64 num_secs)
+{
+ struct buffer_head *bhs[MAX_BUF_PER_PAGE];
+ s32 nr_bhs = MAX_BUF_PER_PAGE;
+ u64 last_blknr = blknr + num_secs;
+ s32 err, i, n;
+ struct blk_plug plug;
+
+ /* Zeroing the unused blocks on this cluster */
+ n = 0;
+ blk_start_plug(&plug);
+ while (blknr < last_blknr) {
+ bhs[n] = sb_getblk(sb, (sector_t)blknr);
+ if (!bhs[n]) {
+ err = -ENOMEM;
+ blk_finish_plug(&plug);
+ goto error;
+ }
+ memset(bhs[n]->b_data, 0, sb->s_blocksize);
+ set_buffer_uptodate(bhs[n]);
+ mark_buffer_dirty(bhs[n]);
+
+ n++;
+ blknr++;
+
+ if (blknr == last_blknr)
+ break;
+
+ if (n == nr_bhs) {
+ __blkdev_write_bhs(bhs, n);
+
+ for (i = 0; i < n; i++)
+ brelse(bhs[i]);
+ n = 0;
+ }
+ }
+ __blkdev_write_bhs(bhs, n);
+ blk_finish_plug(&plug);
+
+ err = __blkdev_sync_bhs(bhs, n);
+ if (err)
+ goto error;
+
+ for (i = 0; i < n; i++)
+ brelse(bhs[i]);
+
+ return 0;
+
+error:
+ EMSG("%s: failed zeroed sect %llu\n", __func__, blknr);
+ for (i = 0; i < n; i++)
+ bforget(bhs[i]);
+
+ return err;
+}
+
+s32 write_msect_zero(struct super_block *sb, u64 sec, u64 num_secs)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ if (((sec+num_secs) > fsi->num_sectors) && (fsi->num_sectors > 0)) {
+ sdfat_fs_error_ratelimit(sb, "%s: out of range(sect:%llu len:%llu)",
+ __func__, sec, num_secs);
+ return -EIO;
+ }
+
+ /* Just return -EAGAIN if it is failed */
+ if (__buffer_zeroed(sb, sec, num_secs))
+ return -EAGAIN;
+
+ return 0;
+} /* end of write_msect_zero */
+
+/* end of blkdev.c */
--- /dev/null
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/************************************************************************/
+/* */
+/* PROJECT : exFAT & FAT12/16/32 File System */
+/* FILE : cache.c */
+/* PURPOSE : sdFAT Cache Manager */
+/* (FAT Cache & Buffer Cache) */
+/* */
+/*----------------------------------------------------------------------*/
+/* NOTES */
+/* */
+/* */
+/************************************************************************/
+
+#include <linux/swap.h> /* for mark_page_accessed() */
+#include <asm/unaligned.h>
+
+#include "sdfat.h"
+#include "core.h"
+
+#define DEBUG_HASH_LIST
+#define DEBUG_HASH_PREV (0xAAAA5555)
+#define DEBUG_HASH_NEXT (0x5555AAAA)
+
+/*----------------------------------------------------------------------*/
+/* Global Variable Definitions */
+/*----------------------------------------------------------------------*/
+/* All buffer structures are protected w/ fsi->v_sem */
+
+/*----------------------------------------------------------------------*/
+/* Local Variable Definitions */
+/*----------------------------------------------------------------------*/
+#define LOCKBIT (0x01)
+#define DIRTYBIT (0x02)
+#define KEEPBIT (0x04)
+
+/*----------------------------------------------------------------------*/
+/* Cache handling function declarations */
+/*----------------------------------------------------------------------*/
+static cache_ent_t *__fcache_find(struct super_block *sb, u64 sec);
+static cache_ent_t *__fcache_get(struct super_block *sb);
+static void __fcache_insert_hash(struct super_block *sb, cache_ent_t *bp);
+static void __fcache_remove_hash(cache_ent_t *bp);
+
+static cache_ent_t *__dcache_find(struct super_block *sb, u64 sec);
+static cache_ent_t *__dcache_get(struct super_block *sb);
+static void __dcache_insert_hash(struct super_block *sb, cache_ent_t *bp);
+static void __dcache_remove_hash(cache_ent_t *bp);
+
+/*----------------------------------------------------------------------*/
+/* Static functions */
+/*----------------------------------------------------------------------*/
+static void push_to_mru(cache_ent_t *bp, cache_ent_t *list)
+{
+ bp->next = list->next;
+ bp->prev = list;
+ list->next->prev = bp;
+ list->next = bp;
+}
+
+static void push_to_lru(cache_ent_t *bp, cache_ent_t *list)
+{
+ bp->prev = list->prev;
+ bp->next = list;
+ list->prev->next = bp;
+ list->prev = bp;
+}
+
+static void move_to_mru(cache_ent_t *bp, cache_ent_t *list)
+{
+ bp->prev->next = bp->next;
+ bp->next->prev = bp->prev;
+ push_to_mru(bp, list);
+}
+
+static void move_to_lru(cache_ent_t *bp, cache_ent_t *list)
+{
+ bp->prev->next = bp->next;
+ bp->next->prev = bp->prev;
+ push_to_lru(bp, list);
+}
+
+static inline s32 __check_hash_valid(cache_ent_t *bp)
+{
+#ifdef DEBUG_HASH_LIST
+ if ((bp->hash.next == (cache_ent_t *)DEBUG_HASH_NEXT) ||
+ (bp->hash.prev == (cache_ent_t *)DEBUG_HASH_PREV)) {
+ return -EINVAL;
+ }
+#endif
+ if ((bp->hash.next == bp) || (bp->hash.prev == bp))
+ return -EINVAL;
+
+ return 0;
+}
+
+static inline void __remove_from_hash(cache_ent_t *bp)
+{
+ (bp->hash.prev)->hash.next = bp->hash.next;
+ (bp->hash.next)->hash.prev = bp->hash.prev;
+ bp->hash.next = bp;
+ bp->hash.prev = bp;
+#ifdef DEBUG_HASH_LIST
+ bp->hash.next = (cache_ent_t *)DEBUG_HASH_NEXT;
+ bp->hash.prev = (cache_ent_t *)DEBUG_HASH_PREV;
+#endif
+}
+
+/* Do FAT mirroring (don't sync)
+ * sec: sector No. in FAT1
+ * bh: bh of sec.
+ */
+static inline s32 __fat_copy(struct super_block *sb, u64 sec, struct buffer_head *bh, int sync)
+{
+#ifdef CONFIG_SDFAT_FAT_MIRRORING
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ u64 sec2;
+
+ if (fsi->FAT2_start_sector != fsi->FAT1_start_sector) {
+ sec2 = sec - fsi->FAT1_start_sector + fsi->FAT2_start_sector;
+ BUG_ON(sec2 != (sec + (u64)fsi->num_FAT_sectors));
+
+ MMSG("BD: fat mirroring (%llu in FAT1, %llu in FAT2)\n", sec, sec2);
+ if (write_sect(sb, sec2, bh, sync))
+ return -EIO;
+ }
+#else
+ /* DO NOTHING */
+#endif
+ return 0;
+} /* end of __fat_copy */
+
+/*
+ * returns 1, if bp is flushed
+ * returns 0, if bp is not dirty
+ * returns -1, if error occurs
+ */
+static s32 __fcache_ent_flush(struct super_block *sb, cache_ent_t *bp, u32 sync)
+{
+ if (!(bp->flag & DIRTYBIT))
+ return 0;
+#ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
+ // Make buffer dirty (XXX: Naive impl.)
+ if (write_sect(sb, bp->sec, bp->bh, 0))
+ return -EIO;
+
+ if (__fat_copy(sb, bp->sec, bp->bh, 0))
+ return -EIO;
+#endif
+ bp->flag &= ~(DIRTYBIT);
+
+ if (sync)
+ sync_dirty_buffer(bp->bh);
+
+ return 1;
+}
+
+static s32 __fcache_ent_discard(struct super_block *sb, cache_ent_t *bp)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ __fcache_remove_hash(bp);
+ bp->sec = ~0;
+ bp->flag = 0;
+
+ if (bp->bh) {
+ __brelse(bp->bh);
+ bp->bh = NULL;
+ }
+ move_to_lru(bp, &fsi->fcache.lru_list);
+ return 0;
+}
+
+u8 *fcache_getblk(struct super_block *sb, u64 sec)
+{
+ cache_ent_t *bp;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ u32 page_ra_count = FCACHE_MAX_RA_SIZE >> sb->s_blocksize_bits;
+
+ bp = __fcache_find(sb, sec);
+ if (bp) {
+ if (bdev_check_bdi_valid(sb)) {
+ __fcache_ent_flush(sb, bp, 0);
+ __fcache_ent_discard(sb, bp);
+ return NULL;
+ }
+ move_to_mru(bp, &fsi->fcache.lru_list);
+ return bp->bh->b_data;
+ }
+
+ bp = __fcache_get(sb);
+ if (!__check_hash_valid(bp))
+ __fcache_remove_hash(bp);
+
+ bp->sec = sec;
+ bp->flag = 0;
+ __fcache_insert_hash(sb, bp);
+
+ /* Naive FAT read-ahead (increase I/O unit to page_ra_count) */
+ if ((sec & (page_ra_count - 1)) == 0)
+ bdev_readahead(sb, sec, (u64)page_ra_count);
+
+ /*
+ * patch 1.2.4 : buffer_head null pointer exception problem.
+ *
+ * When read_sect is failed, fcache should be moved to
+ * EMPTY hash_list and the first of lru_list.
+ */
+ if (read_sect(sb, sec, &(bp->bh), 1)) {
+ __fcache_ent_discard(sb, bp);
+ return NULL;
+ }
+
+ return bp->bh->b_data;
+}
+
+static inline int __mark_delayed_dirty(struct super_block *sb, cache_ent_t *bp)
+{
+#ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ if (fsi->vol_type == EXFAT)
+ return -ENOTSUPP;
+
+ bp->flag |= DIRTYBIT;
+ return 0;
+#else
+ return -ENOTSUPP;
+#endif
+}
+
+
+
+s32 fcache_modify(struct super_block *sb, u64 sec)
+{
+ cache_ent_t *bp;
+
+ bp = __fcache_find(sb, sec);
+ if (!bp) {
+ sdfat_fs_error(sb, "Can`t find fcache (sec 0x%016llx)", sec);
+ return -EIO;
+ }
+
+ if (!__mark_delayed_dirty(sb, bp))
+ return 0;
+
+ if (write_sect(sb, sec, bp->bh, 0))
+ return -EIO;
+
+ if (__fat_copy(sb, sec, bp->bh, 0))
+ return -EIO;
+
+ return 0;
+}
+
+/*======================================================================*/
+/* Cache Initialization Functions */
+/*======================================================================*/
+s32 meta_cache_init(struct super_block *sb)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ s32 i;
+
+ /* LRU list */
+ fsi->fcache.lru_list.next = &fsi->fcache.lru_list;
+ fsi->fcache.lru_list.prev = fsi->fcache.lru_list.next;
+
+ for (i = 0; i < FAT_CACHE_SIZE; i++) {
+ fsi->fcache.pool[i].sec = ~0;
+ fsi->fcache.pool[i].flag = 0;
+ fsi->fcache.pool[i].bh = NULL;
+ fsi->fcache.pool[i].prev = NULL;
+ fsi->fcache.pool[i].next = NULL;
+ push_to_mru(&(fsi->fcache.pool[i]), &fsi->fcache.lru_list);
+ }
+
+ fsi->dcache.lru_list.next = &fsi->dcache.lru_list;
+ fsi->dcache.lru_list.prev = fsi->dcache.lru_list.next;
+ fsi->dcache.keep_list.next = &fsi->dcache.keep_list;
+ fsi->dcache.keep_list.prev = fsi->dcache.keep_list.next;
+
+ // Initially, all the BUF_CACHEs are in the LRU list
+ for (i = 0; i < BUF_CACHE_SIZE; i++) {
+ fsi->dcache.pool[i].sec = ~0;
+ fsi->dcache.pool[i].flag = 0;
+ fsi->dcache.pool[i].bh = NULL;
+ fsi->dcache.pool[i].prev = NULL;
+ fsi->dcache.pool[i].next = NULL;
+ push_to_mru(&(fsi->dcache.pool[i]), &fsi->dcache.lru_list);
+ }
+
+ /* HASH list */
+ for (i = 0; i < FAT_CACHE_HASH_SIZE; i++) {
+ fsi->fcache.hash_list[i].sec = ~0;
+ fsi->fcache.hash_list[i].hash.next = &(fsi->fcache.hash_list[i]);
+;
+ fsi->fcache.hash_list[i].hash.prev = fsi->fcache.hash_list[i].hash.next;
+ }
+
+ for (i = 0; i < FAT_CACHE_SIZE; i++)
+ __fcache_insert_hash(sb, &(fsi->fcache.pool[i]));
+
+ for (i = 0; i < BUF_CACHE_HASH_SIZE; i++) {
+ fsi->dcache.hash_list[i].sec = ~0;
+ fsi->dcache.hash_list[i].hash.next = &(fsi->dcache.hash_list[i]);
+
+ fsi->dcache.hash_list[i].hash.prev = fsi->dcache.hash_list[i].hash.next;
+ }
+
+ for (i = 0; i < BUF_CACHE_SIZE; i++)
+ __dcache_insert_hash(sb, &(fsi->dcache.pool[i]));
+
+ return 0;
+}
+
+s32 meta_cache_shutdown(struct super_block *sb)
+{
+ return 0;
+}
+
+/*======================================================================*/
+/* FAT Read/Write Functions */
+/*======================================================================*/
+s32 fcache_release_all(struct super_block *sb)
+{
+ s32 ret = 0;
+ cache_ent_t *bp;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ s32 dirtycnt = 0;
+
+ bp = fsi->fcache.lru_list.next;
+ while (bp != &fsi->fcache.lru_list) {
+ s32 ret_tmp = __fcache_ent_flush(sb, bp, 0);
+
+ if (ret_tmp < 0)
+ ret = ret_tmp;
+ else
+ dirtycnt += ret_tmp;
+
+ bp->sec = ~0;
+ bp->flag = 0;
+
+ if (bp->bh) {
+ __brelse(bp->bh);
+ bp->bh = NULL;
+ }
+ bp = bp->next;
+ }
+
+ DMSG("BD:Release / dirty fat cache: %d (err:%d)\n", dirtycnt, ret);
+ return ret;
+}
+
+
+/* internal DIRTYBIT marked => bh dirty */
+s32 fcache_flush(struct super_block *sb, u32 sync)
+{
+ s32 ret = 0;
+ cache_ent_t *bp;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ s32 dirtycnt = 0;
+
+ bp = fsi->fcache.lru_list.next;
+ while (bp != &fsi->fcache.lru_list) {
+ ret = __fcache_ent_flush(sb, bp, sync);
+ if (ret < 0)
+ break;
+
+ dirtycnt += ret;
+ bp = bp->next;
+ }
+
+ MMSG("BD: flush / dirty fat cache: %d (err:%d)\n", dirtycnt, ret);
+ return ret;
+}
+
+static cache_ent_t *__fcache_find(struct super_block *sb, u64 sec)
+{
+ s32 off;
+ cache_ent_t *bp, *hp;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ off = (sec + (sec >> fsi->sect_per_clus_bits)) & (FAT_CACHE_HASH_SIZE - 1);
+ hp = &(fsi->fcache.hash_list[off]);
+ for (bp = hp->hash.next; bp != hp; bp = bp->hash.next) {
+ if (bp->sec == sec) {
+ /*
+ * patch 1.2.4 : for debugging
+ */
+ WARN(!bp->bh, "[SDFAT] fcache has no bh. "
+ "It will make system panic.\n");
+
+ touch_buffer(bp->bh);
+ return bp;
+ }
+ }
+ return NULL;
+}
+
+static cache_ent_t *__fcache_get(struct super_block *sb)
+{
+ cache_ent_t *bp;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ bp = fsi->fcache.lru_list.prev;
+#ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
+ while (bp->flag & DIRTYBIT) {
+ cache_ent_t *bp_prev = bp->prev;
+
+ bp = bp_prev;
+ if (bp == &fsi->fcache.lru_list) {
+ DMSG("BD: fat cache flooding\n");
+ fcache_flush(sb, 0); // flush all dirty FAT caches
+ bp = fsi->fcache.lru_list.prev;
+ }
+ }
+#endif
+// if (bp->flag & DIRTYBIT)
+// sync_dirty_buffer(bp->bh);
+
+ move_to_mru(bp, &fsi->fcache.lru_list);
+ return bp;
+}
+
+static void __fcache_insert_hash(struct super_block *sb, cache_ent_t *bp)
+{
+ s32 off;
+ cache_ent_t *hp;
+ FS_INFO_T *fsi;
+
+ fsi = &(SDFAT_SB(sb)->fsi);
+ off = (bp->sec + (bp->sec >> fsi->sect_per_clus_bits)) & (FAT_CACHE_HASH_SIZE-1);
+
+ hp = &(fsi->fcache.hash_list[off]);
+ bp->hash.next = hp->hash.next;
+ bp->hash.prev = hp;
+ hp->hash.next->hash.prev = bp;
+ hp->hash.next = bp;
+}
+
+
+static void __fcache_remove_hash(cache_ent_t *bp)
+{
+#ifdef DEBUG_HASH_LIST
+ if ((bp->hash.next == (cache_ent_t *)DEBUG_HASH_NEXT) ||
+ (bp->hash.prev == (cache_ent_t *)DEBUG_HASH_PREV)) {
+ EMSG("%s: FATAL: tried to remove already-removed-cache-entry"
+ "(bp:%p)\n", __func__, bp);
+ return;
+ }
+#endif
+ WARN_ON(bp->flag & DIRTYBIT);
+ __remove_from_hash(bp);
+}
+
+/*======================================================================*/
+/* Buffer Read/Write Functions */
+/*======================================================================*/
+/* Read-ahead a cluster */
+s32 dcache_readahead(struct super_block *sb, u64 sec)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ struct buffer_head *bh;
+ u32 max_ra_count = DCACHE_MAX_RA_SIZE >> sb->s_blocksize_bits;
+ u32 page_ra_count = PAGE_SIZE >> sb->s_blocksize_bits;
+ u32 adj_ra_count = max(fsi->sect_per_clus, page_ra_count);
+ u32 ra_count = min(adj_ra_count, max_ra_count);
+
+ /* Read-ahead is not required */
+ if (fsi->sect_per_clus == 1)
+ return 0;
+
+ if (sec < fsi->data_start_sector) {
+ EMSG("BD: %s: requested sector is invalid(sect:%llu, root:%llu)\n",
+ __func__, sec, fsi->data_start_sector);
+ return -EIO;
+ }
+
+ /* Not sector aligned with ra_count, resize ra_count to page size */
+ if ((sec - fsi->data_start_sector) & (ra_count - 1))
+ ra_count = page_ra_count;
+
+ bh = sb_find_get_block(sb, sec);
+ if (!bh || !buffer_uptodate(bh))
+ bdev_readahead(sb, sec, (u64)ra_count);
+
+ brelse(bh);
+
+ return 0;
+}
+
+/*
+ * returns 1, if bp is flushed
+ * returns 0, if bp is not dirty
+ * returns -1, if error occurs
+ */
+static s32 __dcache_ent_flush(struct super_block *sb, cache_ent_t *bp, u32 sync)
+{
+ if (!(bp->flag & DIRTYBIT))
+ return 0;
+#ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
+ // Make buffer dirty (XXX: Naive impl.)
+ if (write_sect(sb, bp->sec, bp->bh, 0))
+ return -EIO;
+#endif
+ bp->flag &= ~(DIRTYBIT);
+
+ if (sync)
+ sync_dirty_buffer(bp->bh);
+
+ return 1;
+}
+
+static s32 __dcache_ent_discard(struct super_block *sb, cache_ent_t *bp)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ MMSG("%s : bp[%p] (sec:%016llx flag:%08x bh:%p) list(prev:%p next:%p) "
+ "hash(prev:%p next:%p)\n", __func__,
+ bp, bp->sec, bp->flag, bp->bh, bp->prev, bp->next,
+ bp->hash.prev, bp->hash.next);
+
+ __dcache_remove_hash(bp);
+ bp->sec = ~0;
+ bp->flag = 0;
+
+ if (bp->bh) {
+ __brelse(bp->bh);
+ bp->bh = NULL;
+ }
+
+ move_to_lru(bp, &fsi->dcache.lru_list);
+ return 0;
+}
+
+u8 *dcache_getblk(struct super_block *sb, u64 sec)
+{
+ cache_ent_t *bp;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ bp = __dcache_find(sb, sec);
+ if (bp) {
+ if (bdev_check_bdi_valid(sb)) {
+ MMSG("%s: found cache(%p, sect:%llu). But invalid BDI\n"
+ , __func__, bp, sec);
+ __dcache_ent_flush(sb, bp, 0);
+ __dcache_ent_discard(sb, bp);
+ return NULL;
+ }
+
+ if (!(bp->flag & KEEPBIT)) // already in keep list
+ move_to_mru(bp, &fsi->dcache.lru_list);
+
+ return bp->bh->b_data;
+ }
+
+ bp = __dcache_get(sb);
+
+ if (!__check_hash_valid(bp))
+ __dcache_remove_hash(bp);
+
+ bp->sec = sec;
+ bp->flag = 0;
+ __dcache_insert_hash(sb, bp);
+
+ if (read_sect(sb, sec, &(bp->bh), 1)) {
+ __dcache_ent_discard(sb, bp);
+ return NULL;
+ }
+
+ return bp->bh->b_data;
+
+}
+
+s32 dcache_modify(struct super_block *sb, u64 sec)
+{
+ s32 ret = -EIO;
+ cache_ent_t *bp;
+
+ set_sb_dirty(sb);
+
+ bp = __dcache_find(sb, sec);
+ if (unlikely(!bp)) {
+ sdfat_fs_error(sb, "Can`t find dcache (sec 0x%016llx)", sec);
+ return -EIO;
+ }
+#ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
+ if (SDFAT_SB(sb)->fsi.vol_type != EXFAT) {
+ bp->flag |= DIRTYBIT;
+ return 0;
+ }
+#endif
+ ret = write_sect(sb, sec, bp->bh, 0);
+
+ if (ret) {
+ DMSG("%s : failed to modify buffer(err:%d, sec:%llu, bp:0x%p)\n",
+ __func__, ret, sec, bp);
+ }
+
+ return ret;
+}
+
+s32 dcache_lock(struct super_block *sb, u64 sec)
+{
+ cache_ent_t *bp;
+
+ bp = __dcache_find(sb, sec);
+ if (likely(bp)) {
+ bp->flag |= LOCKBIT;
+ return 0;
+ }
+
+ EMSG("%s : failed to lock buffer(sec:%llu, bp:0x%p)\n", __func__, sec, bp);
+ return -EIO;
+}
+
+s32 dcache_unlock(struct super_block *sb, u64 sec)
+{
+ cache_ent_t *bp;
+
+ bp = __dcache_find(sb, sec);
+ if (likely(bp)) {
+ bp->flag &= ~(LOCKBIT);
+ return 0;
+ }
+
+ EMSG("%s : failed to unlock buffer (sec:%llu, bp:0x%p)\n", __func__, sec, bp);
+ return -EIO;
+}
+
+s32 dcache_release(struct super_block *sb, u64 sec)
+{
+ cache_ent_t *bp;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ bp = __dcache_find(sb, sec);
+ if (unlikely(!bp))
+ return -ENOENT;
+
+#ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
+ if (bp->flag & DIRTYBIT) {
+ if (write_sect(sb, bp->sec, bp->bh, 0))
+ return -EIO;
+ }
+#endif
+ bp->sec = ~0;
+ bp->flag = 0;
+
+ if (bp->bh) {
+ __brelse(bp->bh);
+ bp->bh = NULL;
+ }
+
+ move_to_lru(bp, &fsi->dcache.lru_list);
+ return 0;
+}
+
+s32 dcache_release_all(struct super_block *sb)
+{
+ s32 ret = 0;
+ cache_ent_t *bp;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ s32 dirtycnt = 0;
+
+ /* Connect list elements:
+ * LRU list : (A - B - ... - bp_front) + (bp_first + ... + bp_last)
+ */
+ while (fsi->dcache.keep_list.prev != &fsi->dcache.keep_list) {
+ cache_ent_t *bp_keep = fsi->dcache.keep_list.prev;
+ // bp_keep->flag &= ~(KEEPBIT); // Will be 0-ed later
+ move_to_mru(bp_keep, &fsi->dcache.lru_list);
+ }
+
+ bp = fsi->dcache.lru_list.next;
+ while (bp != &fsi->dcache.lru_list) {
+#ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
+ if (bp->flag & DIRTYBIT) {
+ dirtycnt++;
+ if (write_sect(sb, bp->sec, bp->bh, 0))
+ ret = -EIO;
+ }
+#endif
+ bp->sec = ~0;
+ bp->flag = 0;
+
+ if (bp->bh) {
+ __brelse(bp->bh);
+ bp->bh = NULL;
+ }
+ bp = bp->next;
+ }
+
+ DMSG("BD:Release / dirty buf cache: %d (err:%d)", dirtycnt, ret);
+ return ret;
+}
+
+
+s32 dcache_flush(struct super_block *sb, u32 sync)
+{
+ s32 ret = 0;
+ cache_ent_t *bp;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ s32 dirtycnt = 0;
+ s32 keepcnt = 0;
+
+ /* Connect list elements:
+ * LRU list : (A - B - ... - bp_front) + (bp_first + ... + bp_last)
+ */
+ while (fsi->dcache.keep_list.prev != &fsi->dcache.keep_list) {
+ cache_ent_t *bp_keep = fsi->dcache.keep_list.prev;
+
+ bp_keep->flag &= ~(KEEPBIT); // Will be 0-ed later
+ move_to_mru(bp_keep, &fsi->dcache.lru_list);
+ keepcnt++;
+ }
+
+ bp = fsi->dcache.lru_list.next;
+ while (bp != &fsi->dcache.lru_list) {
+ if (bp->flag & DIRTYBIT) {
+#ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
+ // Make buffer dirty (XXX: Naive impl.)
+ if (write_sect(sb, bp->sec, bp->bh, 0)) {
+ ret = -EIO;
+ break;
+ }
+
+#endif
+ bp->flag &= ~(DIRTYBIT);
+ dirtycnt++;
+
+ if (sync != 0)
+ sync_dirty_buffer(bp->bh);
+ }
+ bp = bp->next;
+ }
+
+ MMSG("BD: flush / dirty dentry cache: %d (%d from keeplist, err:%d)\n",
+ dirtycnt, keepcnt, ret);
+ return ret;
+}
+
+static cache_ent_t *__dcache_find(struct super_block *sb, u64 sec)
+{
+ s32 off;
+ cache_ent_t *bp, *hp;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ off = (sec + (sec >> fsi->sect_per_clus_bits)) & (BUF_CACHE_HASH_SIZE - 1);
+
+ hp = &(fsi->dcache.hash_list[off]);
+ for (bp = hp->hash.next; bp != hp; bp = bp->hash.next) {
+ if (bp->sec == sec) {
+ touch_buffer(bp->bh);
+ return bp;
+ }
+ }
+ return NULL;
+}
+
+static cache_ent_t *__dcache_get(struct super_block *sb)
+{
+ cache_ent_t *bp;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ bp = fsi->dcache.lru_list.prev;
+#ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
+ while (bp->flag & (DIRTYBIT | LOCKBIT)) {
+ cache_ent_t *bp_prev = bp->prev; // hold prev
+
+ if (bp->flag & DIRTYBIT) {
+ MMSG("BD: Buf cache => Keep list\n");
+ bp->flag |= KEEPBIT;
+ move_to_mru(bp, &fsi->dcache.keep_list);
+ }
+ bp = bp_prev;
+
+ /* If all dcaches are dirty */
+ if (bp == &fsi->dcache.lru_list) {
+ DMSG("BD: buf cache flooding\n");
+ dcache_flush(sb, 0);
+ bp = fsi->dcache.lru_list.prev;
+ }
+ }
+#else
+ while (bp->flag & LOCKBIT)
+ bp = bp->prev;
+#endif
+// if (bp->flag & DIRTYBIT)
+// sync_dirty_buffer(bp->bh);
+
+ move_to_mru(bp, &fsi->dcache.lru_list);
+ return bp;
+}
+
+static void __dcache_insert_hash(struct super_block *sb, cache_ent_t *bp)
+{
+ s32 off;
+ cache_ent_t *hp;
+ FS_INFO_T *fsi;
+
+ fsi = &(SDFAT_SB(sb)->fsi);
+ off = (bp->sec + (bp->sec >> fsi->sect_per_clus_bits)) & (BUF_CACHE_HASH_SIZE-1);
+
+ hp = &(fsi->dcache.hash_list[off]);
+ bp->hash.next = hp->hash.next;
+ bp->hash.prev = hp;
+ hp->hash.next->hash.prev = bp;
+ hp->hash.next = bp;
+}
+
+static void __dcache_remove_hash(cache_ent_t *bp)
+{
+#ifdef DEBUG_HASH_LIST
+ if ((bp->hash.next == (cache_ent_t *)DEBUG_HASH_NEXT) ||
+ (bp->hash.prev == (cache_ent_t *)DEBUG_HASH_PREV)) {
+ EMSG("%s: FATAL: tried to remove already-removed-cache-entry"
+ "(bp:%p)\n", __func__, bp);
+ return;
+ }
+#endif
+ WARN_ON(bp->flag & DIRTYBIT);
+ __remove_from_hash(bp);
+}
+
+
+/* end of cache.c */
--- /dev/null
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _SDFAT_CONFIG_H
+#define _SDFAT_CONFIG_H
+/*======================================================================*/
+/* */
+/* FFS CONFIGURATIONS */
+/* (CHANGE THIS PART IF REQUIRED) */
+/* */
+/*======================================================================*/
+
+/*----------------------------------------------------------------------*/
+/* Feature Config */
+/*----------------------------------------------------------------------*/
+
+/*----------------------------------------------------------------------*/
+/* Debug/Experimental Config */
+/*----------------------------------------------------------------------*/
+//#define CONFIG_SDFAT_TRACE_IO
+//#define CONFIG_SDFAT_TRACE_LOCK /* Trace elapsed time in lock_super(sb) */
+
+/*----------------------------------------------------------------------*/
+/* Defragmentation Config */
+/*----------------------------------------------------------------------*/
+//#define CONFIG_SDFAT_DFR
+//#define CONFIG_SDFAT_DFR_PACKING
+//#define CONFIG_SDFAT_DFR_DEBUG
+
+/*----------------------------------------------------------------------*/
+/* Config for Kernel equal or newer than 3.7 */
+/*----------------------------------------------------------------------*/
+#ifndef CONFIG_SDFAT_WRITE_SB_INTERVAL_CSECS
+#define CONFIG_SDFAT_WRITE_SB_INTERVAL_CSECS (dirty_writeback_interval)
+#endif
+
+/*----------------------------------------------------------------------*/
+/* Default Kconfig */
+/*----------------------------------------------------------------------*/
+/* default mount options */
+#ifndef CONFIG_SDFAT_DEFAULT_CODEPAGE /* if Kconfig lacked codepage */
+#define CONFIG_SDFAT_DEFAULT_CODEPAGE 437
+#endif
+
+#ifndef CONFIG_SDFAT_DEFAULT_IOCHARSET /* if Kconfig lacked iocharset */
+#define CONFIG_SDFAT_DEFAULT_IOCHARSET "utf8"
+#endif
+
+#ifndef CONFIG_SDFAT_FAT32_SHORTNAME_SEQ /* Shortname ~1, ... ~9 have higher
+ * priority (WIN32/VFAT-like)
+ */
+//#define CONFIG_SDFAT_FAT32_SHORTNAME_SEQ
+#endif
+
+#ifndef CONFIG_SDFAT_ALIGNED_MPAGE_WRITE
+//#define CONFIG_SDFAT_ALIGNED_MPAGE_WRITE
+#endif
+
+#ifndef CONFIG_SDFAT_FAT_MIRRORING /* if Kconfig lacked fat-mirroring option */
+#define CONFIG_SDFAT_FAT_MIRRORING /* Write FAT 1, FAT 2 simultaneously */
+#endif
+
+#ifndef CONFIG_SDFAT_DELAYED_META_DIRTY
+//#define CONFIG_SDFAT_DELAYED_META_DIRTY /* delayed DIR/FAT dirty support */
+#endif
+
+#ifndef CONFIG_SDFAT_SUPPORT_DIR_SYNC
+//#define CONFIG_SDFAT_SUPPORT_DIR_SYNC /* support DIR_SYNC */
+#endif
+
+#ifndef CONFIG_SDFAT_CHECK_RO_ATTR
+//#define CONFIG_SDFAT_CHECK_RO_ATTR
+#endif
+
+#ifndef CONFIG_SDFAT_RESTRICT_EXT_ONLY_SFN
+#define CONFIG_SDFAT_RESTRICT_EXT_ONLY_SFN
+#endif
+
+#ifndef CONFIG_SDFAT_ALLOW_LOOKUP_LOSSY_SFN
+//#define CONFIG_SDFAT_ALLOW_LOOKUP_LOSSY_SFN
+#endif
+
+#ifndef CONFIG_SDFAT_DBG_SHOW_PID
+//#define CONFIG_SDFAT_DBG_SHOW_PID
+#endif
+
+#ifndef CONFIG_SDFAT_VIRTUAL_XATTR
+//#define CONFIG_SDFAT_VIRTUAL_XATTR
+#endif
+
+#ifndef CONFIG_SDFAT_SUPPORT_STLOG
+//#define CONFIG_SDFAT_SUPPORT_STLOG
+#endif
+
+#ifndef CONFIG_SDFAT_DEBUG
+//{
+//#define CONFIG_SDFAT_DEBUG
+
+#ifndef CONFIG_SDFAT_DBG_IOCTL
+//#define CONFIG_SDFAT_DBG_IOCTL
+#endif
+
+#ifndef CONFIG_SDFAT_DBG_MSG
+//#define CONFIG_SDFAT_DBG_MSG
+#endif
+
+#ifndef CONFIG_SDFAT_DBG_CAREFUL
+//#define CONFIG_SDFAT_DBG_CAREFUL
+#endif
+
+#ifndef CONFIG_SDFAT_DBG_BUGON
+//#define CONFIG_SDFAT_DBG_BUGON
+#endif
+
+#ifndef CONFIG_SDFAT_DBG_WARNON
+//#define CONFIG_SDFAT_DBG_WARNON
+#endif
+//}
+#endif /* CONFIG_SDFAT_DEBUG */
+
+
+#ifndef CONFIG_SDFAT_TRACE_SB_LOCK
+//#define CONFIG_SDFAT_TRACE_SB_LOCK
+#endif
+
+#ifndef CONFIG_SDFAT_TRACE_ELAPSED_TIME
+//#define CONFIG_SDFAT_TRACE_ELAPSED_TIME
+#endif
+
+#endif /* _SDFAT_CONFIG_H */
+
+/* end of config.h */
--- /dev/null
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/************************************************************************/
+/* */
+/* PROJECT : exFAT & FAT12/16/32 File System */
+/* FILE : core.c */
+/* PURPOSE : FAT & exFAT common core code for sdFAT */
+/* */
+/*----------------------------------------------------------------------*/
+/* NOTES */
+/* */
+/* */
+/************************************************************************/
+
+#include <linux/version.h>
+#include <linux/blkdev.h>
+#include <linux/workqueue.h>
+#include <linux/writeback.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0)
+#include <linux/iversion.h>
+#endif
+
+#include "sdfat.h"
+#include "core.h"
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+
+/*************************************************************************
+ * FUNCTIONS WHICH HAS KERNEL VERSION DEPENDENCY
+ *************************************************************************/
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)
+static inline u64 inode_peek_iversion(struct inode *inode)
+{
+ return inode->i_version;
+}
+#endif
+
+
+/*----------------------------------------------------------------------*/
+/* Constant & Macro Definitions */
+/*----------------------------------------------------------------------*/
+static inline void __set_sb_dirty(struct super_block *sb)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)
+ sb->s_dirt = 1;
+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0) */
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+
+ sbi->s_dirt = 1;
+ /* Insert work */
+ spin_lock(&sbi->work_lock);
+ if (!sbi->write_super_queued) {
+ unsigned long delay;
+
+ delay = msecs_to_jiffies(CONFIG_SDFAT_WRITE_SB_INTERVAL_CSECS * 10);
+ queue_delayed_work(system_long_wq, &sbi->write_super_work, delay);
+ sbi->write_super_queued = 1;
+ }
+ spin_unlock(&sbi->work_lock);
+#endif
+}
+
+void set_sb_dirty(struct super_block *sb)
+{
+ __set_sb_dirty(sb);
+ // XXX: to be removed later, prints too much output
+ //TMSG("%s finished.\n", __func__);
+}
+
+/*----------------------------------------------------------------------*/
+/* Global Variable Definitions */
+/*----------------------------------------------------------------------*/
+
+/*----------------------------------------------------------------------*/
+/* Local Variable Definitions */
+/*----------------------------------------------------------------------*/
+
+static s8 *reserved_names[] = {
+ "AUX ", "CON ", "NUL ", "PRN ",
+ "COM1 ", "COM2 ", "COM3 ", "COM4 ",
+ "COM5 ", "COM6 ", "COM7 ", "COM8 ", "COM9 ",
+ "LPT1 ", "LPT2 ", "LPT3 ", "LPT4 ",
+ "LPT5 ", "LPT6 ", "LPT7 ", "LPT8 ", "LPT9 ",
+ NULL
+};
+
+/*======================================================================*/
+/* Local Function Definitions */
+/*======================================================================*/
+
+/*
+ * File System Management Functions
+ */
+
+static s32 check_type_size(void)
+{
+ /* critical check for system requirement on size of DENTRY_T structure */
+ if (sizeof(DENTRY_T) != DENTRY_SIZE)
+ return -EINVAL;
+
+ if (sizeof(DOS_DENTRY_T) != DENTRY_SIZE)
+ return -EINVAL;
+
+ if (sizeof(EXT_DENTRY_T) != DENTRY_SIZE)
+ return -EINVAL;
+
+ if (sizeof(FILE_DENTRY_T) != DENTRY_SIZE)
+ return -EINVAL;
+
+ if (sizeof(STRM_DENTRY_T) != DENTRY_SIZE)
+ return -EINVAL;
+
+ if (sizeof(NAME_DENTRY_T) != DENTRY_SIZE)
+ return -EINVAL;
+
+ if (sizeof(BMAP_DENTRY_T) != DENTRY_SIZE)
+ return -EINVAL;
+
+ if (sizeof(CASE_DENTRY_T) != DENTRY_SIZE)
+ return -EINVAL;
+
+ if (sizeof(VOLM_DENTRY_T) != DENTRY_SIZE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static s32 __fs_set_vol_flags(struct super_block *sb, u16 new_flag, s32 always_sync)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ s32 err;
+ s32 sync = 0;
+
+ /* flags are not changed */
+ if (fsi->vol_flag == new_flag)
+ return 0;
+
+ fsi->vol_flag = new_flag;
+
+ /* skip updating volume dirty flag,
+ * if this volume has been mounted with read-only
+ */
+ if (sb_rdonly(sb))
+ return 0;
+
+ if (!fsi->pbr_bh) {
+ err = read_sect(sb, 0, &(fsi->pbr_bh), 1);
+ if (err) {
+ EMSG("%s : failed to read boot sector\n", __func__);
+ return err;
+ }
+ }
+
+ if (fsi->vol_type == EXFAT) {
+ pbr64_t *bpb = (pbr64_t *)fsi->pbr_bh->b_data;
+ bpb->bsx.vol_flags = cpu_to_le16(new_flag);
+ } else if (fsi->vol_type == FAT32) {
+ pbr32_t *bpb = (pbr32_t *)fsi->pbr_bh->b_data;
+ bpb->bsx.state = new_flag & VOL_DIRTY ? FAT_VOL_DIRTY : 0x00;
+ } else { /* FAT16/12 */
+ pbr16_t *bpb = (pbr16_t *) fsi->pbr_bh->b_data;
+ bpb->bpb.f16.state = new_flag & VOL_DIRTY ?
+ FAT_VOL_DIRTY : 0x00;
+ }
+
+ if (always_sync)
+ sync = 1;
+ else if ((new_flag == VOL_DIRTY) && (!buffer_dirty(fsi->pbr_bh)))
+ sync = 1;
+ else
+ sync = 0;
+
+ err = write_sect(sb, 0, fsi->pbr_bh, sync);
+ if (err)
+ EMSG("%s : failed to modify volume flag\n", __func__);
+
+ return err;
+}
+
+static s32 fs_set_vol_flags(struct super_block *sb, u16 new_flag)
+{
+ return __fs_set_vol_flags(sb, new_flag, 0);
+}
+
+s32 fscore_set_vol_flags(struct super_block *sb, u16 new_flag, s32 always_sync)
+{
+ return __fs_set_vol_flags(sb, new_flag, always_sync);
+}
+
+static inline s32 __fs_meta_sync(struct super_block *sb, s32 do_sync)
+{
+#ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ if (fsi->vol_type != EXFAT) {
+ MMSG("meta flush in fs_sync(sync=%d)\n", do_sync);
+ fcache_flush(sb, 0);
+ dcache_flush(sb, 0);
+ }
+#else
+ /* DO NOTHING */
+#endif
+ return 0;
+}
+
+static s32 fs_sync(struct super_block *sb, s32 do_sync)
+{
+ s32 err;
+
+ if (!do_sync)
+ return 0;
+
+ err = __fs_meta_sync(sb, do_sync);
+
+ if (!err)
+ err = bdev_sync_all(sb);
+
+ if (err)
+ EMSG("%s : failed to sync. (err:%d)\n", __func__, err);
+
+ return err;
+}
+
+/*
+ * Cluster Management Functions
+ */
+
+static s32 __clear_cluster(struct inode *inode, u32 clu)
+{
+ u64 s, n;
+ struct super_block *sb = inode->i_sb;
+ u32 sect_size = (u32)sb->s_blocksize;
+ s32 ret = 0;
+ struct buffer_head *tmp_bh = NULL;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ if (IS_CLUS_FREE(clu)) { /* FAT16 root_dir */
+ s = fsi->root_start_sector;
+ n = fsi->data_start_sector;
+ } else {
+ s = CLUS_TO_SECT(fsi, clu);
+ n = s + fsi->sect_per_clus;
+ }
+
+ if (IS_DIRSYNC(inode)) {
+ ret = write_msect_zero(sb, s, (u64)fsi->sect_per_clus);
+ if (ret != -EAGAIN)
+ return ret;
+ }
+
+ /* Trying buffered zero writes
+ * if it doesn't have DIRSYNC or write_msect_zero() returned -EAGAIN
+ */
+ for ( ; s < n; s++) {
+#if 0
+ dcache_release(sb, s);
+#endif
+ ret = read_sect(sb, s, &tmp_bh, 0);
+ if (ret)
+ goto out;
+
+ memset((u8 *)tmp_bh->b_data, 0x0, sect_size);
+ ret = write_sect(sb, s, tmp_bh, 0);
+ if (ret)
+ goto out;
+ }
+out:
+ brelse(tmp_bh);
+ return ret;
+} /* end of __clear_cluster */
+
+static s32 __find_last_cluster(struct super_block *sb, CHAIN_T *p_chain, u32 *ret_clu)
+{
+ u32 clu, next;
+ u32 count = 0;
+
+ next = p_chain->dir;
+ if (p_chain->flags == 0x03) {
+ *ret_clu = next + p_chain->size - 1;
+ return 0;
+ }
+
+ do {
+ count++;
+ clu = next;
+ if (fat_ent_get_safe(sb, clu, &next))
+ return -EIO;
+ } while (!IS_CLUS_EOF(next));
+
+ if (p_chain->size != count) {
+ sdfat_fs_error(sb, "bogus directory size "
+ "(clus : ondisk(%d) != counted(%d))",
+ p_chain->size, count);
+ sdfat_debug_bug_on(1);
+ return -EIO;
+ }
+
+ *ret_clu = clu;
+ return 0;
+}
+
+
+static s32 __count_num_clusters(struct super_block *sb, CHAIN_T *p_chain, u32 *ret_count)
+{
+ u32 i, count;
+ u32 clu;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ if (!p_chain->dir || IS_CLUS_EOF(p_chain->dir)) {
+ *ret_count = 0;
+ return 0;
+ }
+
+ if (p_chain->flags == 0x03) {
+ *ret_count = p_chain->size;
+ return 0;
+ }
+
+ clu = p_chain->dir;
+ count = 0;
+ for (i = CLUS_BASE; i < fsi->num_clusters; i++) {
+ count++;
+ if (fat_ent_get_safe(sb, clu, &clu))
+ return -EIO;
+ if (IS_CLUS_EOF(clu))
+ break;
+ }
+
+ *ret_count = count;
+ return 0;
+}
+
+/*
+ * Upcase table Management Functions
+ */
+static void free_upcase_table(struct super_block *sb)
+{
+ u32 i;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ u16 **upcase_table;
+
+ upcase_table = fsi->vol_utbl;
+ for (i = 0 ; i < UTBL_COL_COUNT ; i++) {
+ /* kfree(NULL) is safe */
+ kfree(upcase_table[i]);
+ upcase_table[i] = NULL;
+ }
+
+ /* kfree(NULL) is safe */
+ kfree(fsi->vol_utbl);
+ fsi->vol_utbl = NULL;
+}
+
+static s32 __load_upcase_table(struct super_block *sb, u64 sector, u64 num_sectors, u32 utbl_checksum)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ struct buffer_head *tmp_bh = NULL;
+ u32 sect_size = (u32)sb->s_blocksize;
+ s32 ret = -EIO;
+ u32 i, j;
+
+ u8 skip = false;
+ u32 index = 0;
+ u32 checksum = 0;
+ u16 **upcase_table = kzalloc((UTBL_COL_COUNT * sizeof(u16 *)), GFP_KERNEL);
+
+ if (!upcase_table)
+ return -ENOMEM;
+ /* thanks for kzalloc
+ * memset(upcase_table, 0, UTBL_COL_COUNT * sizeof(u16 *));
+ */
+
+ fsi->vol_utbl = upcase_table;
+ num_sectors += sector;
+
+ while (sector < num_sectors) {
+ ret = read_sect(sb, sector, &tmp_bh, 1);
+ if (ret) {
+ EMSG("%s: failed to read sector(0x%llx)\n",
+ __func__, sector);
+ goto error;
+ }
+ sector++;
+
+ for (i = 0; i < sect_size && index <= 0xFFFF; i += 2) {
+ /* FIXME : is __le16 ok? */
+ //u16 uni = le16_to_cpu(((__le16*)(tmp_bh->b_data))[i]);
+ u16 uni = get_unaligned_le16((u8 *)tmp_bh->b_data+i);
+
+ checksum = ((checksum & 1) ? 0x80000000 : 0) +
+ (checksum >> 1) + *(((u8 *)tmp_bh->b_data)+i);
+ checksum = ((checksum & 1) ? 0x80000000 : 0) +
+ (checksum >> 1) + *(((u8 *)tmp_bh->b_data)+(i+1));
+
+ if (skip) {
+ MMSG("skip from 0x%X to 0x%X(amount of 0x%X)\n",
+ index, index+uni, uni);
+ index += uni;
+ skip = false;
+ } else if (uni == index) {
+ index++;
+ } else if (uni == 0xFFFF) {
+ skip = true;
+ } else { /* uni != index , uni != 0xFFFF */
+ u16 col_index = get_col_index(index);
+
+ if (!upcase_table[col_index]) {
+ upcase_table[col_index] =
+ kmalloc((UTBL_ROW_COUNT * sizeof(u16)), GFP_KERNEL);
+ if (!upcase_table[col_index]) {
+ EMSG("failed to allocate memory"
+ " for column 0x%X\n",
+ col_index);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ for (j = 0; j < UTBL_ROW_COUNT; j++)
+ upcase_table[col_index][j] = (col_index << LOW_INDEX_BIT) | j;
+ }
+
+ upcase_table[col_index][get_row_index(index)] = uni;
+ index++;
+ }
+ }
+ }
+ if (index >= 0xFFFF && utbl_checksum == checksum) {
+ DMSG("%s: load upcase table successfully"
+ "(idx:0x%08x, utbl_chksum:0x%08x)\n",
+ __func__, index, utbl_checksum);
+ if (tmp_bh)
+ brelse(tmp_bh);
+ return 0;
+ }
+
+ EMSG("%s: failed to load upcase table"
+ "(idx:0x%08x, chksum:0x%08x, utbl_chksum:0x%08x)\n",
+ __func__, index, checksum, utbl_checksum);
+
+ ret = -EINVAL;
+error:
+ if (tmp_bh)
+ brelse(tmp_bh);
+ free_upcase_table(sb);
+ return ret;
+}
+
+static s32 __load_default_upcase_table(struct super_block *sb)
+{
+ s32 i, ret = -EIO;
+ u32 j;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ u8 skip = false;
+ u32 index = 0;
+ u16 uni = 0;
+ u16 **upcase_table;
+
+ upcase_table = kmalloc((UTBL_COL_COUNT * sizeof(u16 *)), GFP_KERNEL);
+ if (!upcase_table)
+ return -ENOMEM;
+
+ fsi->vol_utbl = upcase_table;
+ memset(upcase_table, 0, UTBL_COL_COUNT * sizeof(u16 *));
+
+ for (i = 0; index <= 0xFFFF && i < SDFAT_NUM_UPCASE*2; i += 2) {
+ /* FIXME : is __le16 ok? */
+ //uni = le16_to_cpu(((__le16*)uni_def_upcase)[i>>1]);
+ uni = get_unaligned_le16((u8 *)uni_def_upcase+i);
+ if (skip) {
+ MMSG("skip from 0x%x ", index);
+ index += uni;
+ MMSG("to 0x%x (amount of 0x%x)\n", index, uni);
+ skip = false;
+ } else if (uni == index) {
+ index++;
+ } else if (uni == 0xFFFF) {
+ skip = true;
+ } else { /* uni != index , uni != 0xFFFF */
+ u16 col_index = get_col_index(index);
+
+ if (!upcase_table[col_index]) {
+ upcase_table[col_index] = kmalloc((UTBL_ROW_COUNT * sizeof(u16)), GFP_KERNEL);
+ if (!upcase_table[col_index]) {
+ EMSG("failed to allocate memory for "
+ "new column 0x%x\n", col_index);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ for (j = 0; j < UTBL_ROW_COUNT; j++)
+ upcase_table[col_index][j] = (col_index << LOW_INDEX_BIT) | j;
+ }
+
+ upcase_table[col_index][get_row_index(index)] = uni;
+ index++;
+ }
+ }
+
+ if (index >= 0xFFFF)
+ return 0;
+
+error:
+ /* FATAL error: default upcase table has error */
+ free_upcase_table(sb);
+ return ret;
+}
+
+static s32 load_upcase_table(struct super_block *sb)
+{
+ s32 i, ret;
+ u32 tbl_clu, type;
+ u64 sector, tbl_size, num_sectors;
+ u8 blksize_bits = sb->s_blocksize_bits;
+ CHAIN_T clu;
+ CASE_DENTRY_T *ep;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ clu.dir = fsi->root_dir;
+ clu.flags = 0x01;
+
+ if (fsi->vol_type != EXFAT)
+ goto load_default;
+
+ while (!IS_CLUS_EOF(clu.dir)) {
+ for (i = 0; i < fsi->dentries_per_clu; i++) {
+ ep = (CASE_DENTRY_T *) get_dentry_in_dir(sb, &clu, i, NULL);
+ if (!ep)
+ return -EIO;
+
+ type = fsi->fs_func->get_entry_type((DENTRY_T *) ep);
+
+ if (type == TYPE_UNUSED)
+ break;
+ if (type != TYPE_UPCASE)
+ continue;
+
+ tbl_clu = le32_to_cpu(ep->start_clu);
+ tbl_size = le64_to_cpu(ep->size);
+
+ sector = CLUS_TO_SECT(fsi, tbl_clu);
+ num_sectors = ((tbl_size - 1) >> blksize_bits) + 1;
+ ret = __load_upcase_table(sb, sector, num_sectors,
+ le32_to_cpu(ep->checksum));
+
+ if (ret && (ret != -EIO))
+ goto load_default;
+
+ /* load successfully */
+ return ret;
+ }
+
+ if (get_next_clus_safe(sb, &(clu.dir)))
+ return -EIO;
+ }
+
+load_default:
+ sdfat_log_msg(sb, KERN_INFO, "trying to load default upcase table");
+ /* load default upcase table */
+ return __load_default_upcase_table(sb);
+} /* end of load_upcase_table */
+
+
+/*
+ * Directory Entry Management Functions
+ */
+s32 walk_fat_chain(struct super_block *sb, CHAIN_T *p_dir, u32 byte_offset, u32 *clu)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ u32 clu_offset;
+ u32 cur_clu;
+
+ clu_offset = byte_offset >> fsi->cluster_size_bits;
+ cur_clu = p_dir->dir;
+
+ if (p_dir->flags == 0x03) {
+ cur_clu += clu_offset;
+ } else {
+ while (clu_offset > 0) {
+ if (get_next_clus_safe(sb, &cur_clu))
+ return -EIO;
+ if (IS_CLUS_EOF(cur_clu)) {
+ sdfat_fs_error(sb, "invalid dentry access "
+ "beyond EOF (clu : %u, eidx : %d)",
+ p_dir->dir,
+ byte_offset >> DENTRY_SIZE_BITS);
+ return -EIO;
+ }
+ clu_offset--;
+ }
+ }
+
+ if (clu)
+ *clu = cur_clu;
+ return 0;
+}
+
+static s32 find_location(struct super_block *sb, CHAIN_T *p_dir, s32 entry, u64 *sector, s32 *offset)
+{
+ s32 ret;
+ u32 off, clu = 0;
+ u32 blksize_mask = (u32)(sb->s_blocksize-1);
+ u8 blksize_bits = sb->s_blocksize_bits;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ off = entry << DENTRY_SIZE_BITS;
+
+ /* FAT16 root_dir */
+ if (IS_CLUS_FREE(p_dir->dir)) {
+ *offset = off & blksize_mask;
+ *sector = off >> blksize_bits;
+ *sector += fsi->root_start_sector;
+ return 0;
+ }
+
+ ret = walk_fat_chain(sb, p_dir, off, &clu);
+ if (ret)
+ return ret;
+
+ /* byte offset in cluster */
+ off &= (fsi->cluster_size - 1);
+
+ /* byte offset in sector */
+ *offset = off & blksize_mask;
+
+ /* sector offset in cluster */
+ *sector = off >> blksize_bits;
+ *sector += CLUS_TO_SECT(fsi, clu);
+ return 0;
+} /* end of find_location */
+
+DENTRY_T *get_dentry_in_dir(struct super_block *sb, CHAIN_T *p_dir, s32 entry, u64 *sector)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ u32 dentries_per_page = PAGE_SIZE >> DENTRY_SIZE_BITS;
+ s32 off;
+ u64 sec;
+ u8 *buf;
+
+ if (p_dir->dir == DIR_DELETED) {
+ EMSG("%s : abnormal access to deleted dentry\n", __func__);
+ BUG_ON(!fsi->prev_eio);
+ return NULL;
+ }
+
+ if (find_location(sb, p_dir, entry, &sec, &off))
+ return NULL;
+
+ /* DIRECTORY READAHEAD :
+ * Try to read ahead per a page except root directory of fat12/16
+ */
+ if ((!IS_CLUS_FREE(p_dir->dir)) &&
+ !(entry & (dentries_per_page - 1)))
+ dcache_readahead(sb, sec);
+
+ buf = dcache_getblk(sb, sec);
+ if (!buf)
+ return NULL;
+
+ if (sector)
+ *sector = sec;
+ return (DENTRY_T *)(buf + off);
+} /* end of get_dentry_in_dir */
+
+/* used only in search empty_slot() */
+#define CNT_UNUSED_NOHIT (-1)
+#define CNT_UNUSED_HIT (-2)
+/* search EMPTY CONTINUOUS "num_entries" entries */
+static s32 search_empty_slot(struct super_block *sb, HINT_FEMP_T *hint_femp, CHAIN_T *p_dir, s32 num_entries)
+{
+ s32 i, dentry, num_empty = 0;
+ s32 dentries_per_clu;
+ u32 type;
+ CHAIN_T clu;
+ DENTRY_T *ep;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ if (IS_CLUS_FREE(p_dir->dir)) /* FAT16 root_dir */
+ dentries_per_clu = fsi->dentries_in_root;
+ else
+ dentries_per_clu = fsi->dentries_per_clu;
+
+ ASSERT(-1 <= hint_femp->eidx);
+
+ if (hint_femp->eidx != -1) {
+ clu.dir = hint_femp->cur.dir;
+ clu.size = hint_femp->cur.size;
+ clu.flags = hint_femp->cur.flags;
+
+ dentry = hint_femp->eidx;
+
+ if (num_entries <= hint_femp->count) {
+ MMSG("%s: empty slot(HIT) - found "
+ "(clu : 0x%08x eidx : %d)\n",
+ __func__, hint_femp->cur.dir, hint_femp->eidx);
+ hint_femp->eidx = -1;
+
+ if (fsi->vol_type == EXFAT)
+ return dentry;
+
+ return dentry + (num_entries - 1);
+ }
+ MMSG("%s: empty slot(HIT) - search from "
+ "(clu : 0x%08x eidx : %d)\n",
+ __func__, hint_femp->cur.dir, hint_femp->eidx);
+ } else {
+ MMSG("%s: empty slot(MISS) - search from "
+ "(clu:0x%08x eidx : 0)\n",
+ __func__, p_dir->dir);
+
+ clu.dir = p_dir->dir;
+ clu.size = p_dir->size;
+ clu.flags = p_dir->flags;
+
+ dentry = 0;
+ }
+
+ while (!IS_CLUS_EOF(clu.dir)) {
+ /* FAT16 root_dir */
+ if (IS_CLUS_FREE(p_dir->dir))
+ i = dentry % dentries_per_clu;
+ else
+ i = dentry & (dentries_per_clu-1);
+
+ for ( ; i < dentries_per_clu; i++, dentry++) {
+ ep = get_dentry_in_dir(sb, &clu, i, NULL);
+ if (!ep)
+ return -EIO;
+
+ type = fsi->fs_func->get_entry_type(ep);
+
+ if ((type == TYPE_UNUSED) || (type == TYPE_DELETED)) {
+ num_empty++;
+ if (hint_femp->eidx == -1) {
+ hint_femp->eidx = dentry;
+ hint_femp->count = CNT_UNUSED_NOHIT;
+
+ hint_femp->cur.dir = clu.dir;
+ hint_femp->cur.size = clu.size;
+ hint_femp->cur.flags = clu.flags;
+ }
+
+ if ((type == TYPE_UNUSED) &&
+ (hint_femp->count != CNT_UNUSED_HIT)) {
+ hint_femp->count = CNT_UNUSED_HIT;
+ }
+ } else {
+ if ((hint_femp->eidx != -1) &&
+ (hint_femp->count == CNT_UNUSED_HIT)) {
+ /* unused empty group means
+ * an empty group which includes
+ * unused dentry
+ */
+ sdfat_fs_error(sb,
+ "found bogus dentry(%d) "
+ "beyond unused empty group(%d) "
+ "(start_clu : %u, cur_clu : %u)",
+ dentry, hint_femp->eidx, p_dir->dir,
+ clu.dir);
+ return -EIO;
+ }
+
+ num_empty = 0;
+ hint_femp->eidx = -1;
+ }
+
+ if (num_empty >= num_entries) {
+ /* found and invalidate hint_femp */
+ hint_femp->eidx = -1;
+
+ if (fsi->vol_type == EXFAT)
+ return (dentry - (num_entries-1));
+
+ return dentry;
+ }
+ }
+
+ if (IS_CLUS_FREE(p_dir->dir))
+ break; /* FAT16 root_dir */
+
+ if (clu.flags == 0x03) {
+ if ((--clu.size) > 0)
+ clu.dir++;
+ else
+ clu.dir = CLUS_EOF;
+ } else {
+ if (get_next_clus_safe(sb, &(clu.dir)))
+ return -EIO;
+ }
+ }
+
+ return -ENOSPC;
+} /* end of search_empty_slot */
+
+/* find empty directory entry.
+ * if there isn't any empty slot, expand cluster chain.
+ */
+static s32 find_empty_entry(struct inode *inode, CHAIN_T *p_dir, s32 num_entries)
+{
+ s32 dentry;
+ u32 ret, last_clu;
+ u64 sector;
+ u64 size = 0;
+ CHAIN_T clu;
+ DENTRY_T *ep = NULL;
+ struct super_block *sb = inode->i_sb;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ FILE_ID_T *fid = &(SDFAT_I(inode)->fid);
+ HINT_FEMP_T hint_femp;
+
+ hint_femp.eidx = -1;
+
+ ASSERT(-1 <= fid->hint_femp.eidx);
+
+ if (fid->hint_femp.eidx != -1) {
+ memcpy(&hint_femp, &fid->hint_femp, sizeof(HINT_FEMP_T));
+ fid->hint_femp.eidx = -1;
+ }
+
+ /* FAT16 root_dir */
+ if (IS_CLUS_FREE(p_dir->dir))
+ return search_empty_slot(sb, &hint_femp, p_dir, num_entries);
+
+ while ((dentry = search_empty_slot(sb, &hint_femp, p_dir, num_entries)) < 0) {
+ if (dentry == -EIO)
+ break;
+
+ if (fsi->fs_func->check_max_dentries(fid))
+ return -ENOSPC;
+
+ /* we trust p_dir->size regardless of FAT type */
+ if (__find_last_cluster(sb, p_dir, &last_clu))
+ return -EIO;
+
+ /*
+ * Allocate new cluster to this directory
+ */
+ clu.dir = last_clu + 1;
+ clu.size = 0; /* UNUSED */
+ clu.flags = p_dir->flags;
+
+ /* (0) check if there are reserved clusters
+ * (create_dir 의 주석 참고)
+ */
+ if (!IS_CLUS_EOF(fsi->used_clusters) &&
+ ((fsi->used_clusters + fsi->reserved_clusters) >= (fsi->num_clusters - 2)))
+ return -ENOSPC;
+
+ /* (1) allocate a cluster */
+ ret = fsi->fs_func->alloc_cluster(sb, 1, &clu, ALLOC_HOT);
+ if (ret)
+ return ret;
+
+ if (__clear_cluster(inode, clu.dir))
+ return -EIO;
+
+ /* (2) append to the FAT chain */
+ if (clu.flags != p_dir->flags) {
+ /* no-fat-chain bit is disabled,
+ * so fat-chain should be synced with alloc-bmp
+ */
+ chain_cont_cluster(sb, p_dir->dir, p_dir->size);
+ p_dir->flags = 0x01;
+ hint_femp.cur.flags = 0x01;
+ }
+
+ if (clu.flags == 0x01)
+ if (fat_ent_set(sb, last_clu, clu.dir))
+ return -EIO;
+
+ if (hint_femp.eidx == -1) {
+ /* the special case that new dentry
+ * should be allocated from the start of new cluster
+ */
+ hint_femp.eidx = (s32)(p_dir->size <<
+ (fsi->cluster_size_bits - DENTRY_SIZE_BITS));
+ hint_femp.count = fsi->dentries_per_clu;
+
+ hint_femp.cur.dir = clu.dir;
+ hint_femp.cur.size = 0;
+ hint_femp.cur.flags = clu.flags;
+ }
+ hint_femp.cur.size++;
+ p_dir->size++;
+ size = (p_dir->size << fsi->cluster_size_bits);
+
+ /* (3) update the directory entry */
+ if ((fsi->vol_type == EXFAT) && (p_dir->dir != fsi->root_dir)) {
+ ep = get_dentry_in_dir(sb,
+ &(fid->dir), fid->entry+1, §or);
+ if (!ep)
+ return -EIO;
+ fsi->fs_func->set_entry_size(ep, size);
+ fsi->fs_func->set_entry_flag(ep, p_dir->flags);
+ if (dcache_modify(sb, sector))
+ return -EIO;
+
+ if (update_dir_chksum(sb, &(fid->dir), fid->entry))
+ return -EIO;
+ }
+
+ /* directory inode should be updated in here */
+ i_size_write(inode, (loff_t)size);
+ SDFAT_I(inode)->i_size_ondisk += fsi->cluster_size;
+ SDFAT_I(inode)->i_size_aligned += fsi->cluster_size;
+ SDFAT_I(inode)->fid.size = size;
+ SDFAT_I(inode)->fid.flags = p_dir->flags;
+ inode->i_blocks += 1 << (fsi->cluster_size_bits - sb->s_blocksize_bits);
+ }
+
+ return dentry;
+} /* end of find_empty_entry */
+
+#define SDFAT_MIN_SUBDIR (2)
+static const char *dot_name[SDFAT_MIN_SUBDIR] = { DOS_CUR_DIR_NAME, DOS_PAR_DIR_NAME };
+
+static s32 __count_dos_name_entries(struct super_block *sb, CHAIN_T *p_dir, u32 type, u32 *dotcnt)
+{
+ s32 i, count = 0, check_dot = 0;
+ s32 dentries_per_clu;
+ u32 entry_type;
+ CHAIN_T clu;
+ DENTRY_T *ep;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ if (IS_CLUS_FREE(p_dir->dir)) /* FAT16 root_dir */
+ dentries_per_clu = fsi->dentries_in_root;
+ else
+ dentries_per_clu = fsi->dentries_per_clu;
+
+ clu.dir = p_dir->dir;
+ clu.size = p_dir->size;
+ clu.flags = p_dir->flags;
+
+ if (dotcnt) {
+ *dotcnt = 0;
+ if (fsi->vol_type != EXFAT)
+ check_dot = 1;
+ }
+
+ while (!IS_CLUS_EOF(clu.dir)) {
+ for (i = 0; i < dentries_per_clu; i++) {
+ ep = get_dentry_in_dir(sb, &clu, i, NULL);
+ if (!ep)
+ return -EIO;
+
+ entry_type = fsi->fs_func->get_entry_type(ep);
+
+ if (entry_type == TYPE_UNUSED)
+ return count;
+ if (!(type & TYPE_CRITICAL_PRI) && !(type & TYPE_BENIGN_PRI))
+ continue;
+
+ if ((type != TYPE_ALL) && (type != entry_type))
+ continue;
+
+ count++;
+ if (check_dot && (i < SDFAT_MIN_SUBDIR)) {
+ BUG_ON(fsi->vol_type == EXFAT);
+ /* 11 is DOS_NAME_LENGTH */
+ if (!strncmp(ep->dummy, dot_name[i], 11))
+ (*dotcnt)++;
+ }
+ }
+
+ /* FAT16 root_dir */
+ if (IS_CLUS_FREE(p_dir->dir))
+ break;
+
+ if (clu.flags == 0x03) {
+ if ((--clu.size) > 0)
+ clu.dir++;
+ else
+ clu.dir = CLUS_EOF;
+ } else {
+ if (get_next_clus_safe(sb, &(clu.dir)))
+ return -EIO;
+ }
+
+ check_dot = 0;
+ }
+
+ return count;
+}
+
+s32 check_dir_empty(struct super_block *sb, CHAIN_T *p_dir)
+{
+ s32 i, count = 0;
+ s32 dentries_per_clu;
+ u32 type;
+ CHAIN_T clu;
+ DENTRY_T *ep;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ if (IS_CLUS_FREE(p_dir->dir)) /* FAT16 root_dir */
+ dentries_per_clu = fsi->dentries_in_root;
+ else
+ dentries_per_clu = fsi->dentries_per_clu;
+
+ clu.dir = p_dir->dir;
+ clu.size = p_dir->size;
+ clu.flags = p_dir->flags;
+
+ while (!IS_CLUS_EOF(clu.dir)) {
+ for (i = 0; i < dentries_per_clu; i++) {
+ ep = get_dentry_in_dir(sb, &clu, i, NULL);
+ if (!ep)
+ return -EIO;
+
+ type = fsi->fs_func->get_entry_type(ep);
+
+ if (type == TYPE_UNUSED)
+ return 0;
+
+ if ((type != TYPE_FILE) && (type != TYPE_DIR))
+ continue;
+
+ /* FAT16 root_dir */
+ if (IS_CLUS_FREE(p_dir->dir))
+ return -ENOTEMPTY;
+
+ if (fsi->vol_type == EXFAT)
+ return -ENOTEMPTY;
+
+ if ((p_dir->dir == fsi->root_dir) || (++count > 2))
+ return -ENOTEMPTY;
+ }
+
+ /* FAT16 root_dir */
+ if (IS_CLUS_FREE(p_dir->dir))
+ return -ENOTEMPTY;
+
+ if (clu.flags == 0x03) {
+ if ((--clu.size) > 0)
+ clu.dir++;
+ else
+ clu.dir = CLUS_EOF;
+ } else {
+ if (get_next_clus_safe(sb, &(clu.dir)))
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Name Conversion Functions
+ */
+#ifdef CONFIG_SDFAT_ALLOW_LOOKUP_LOSSY_SFN
+ /* over name length only */
+#define NEED_INVALIDATE_SFN(x) ((x) & NLS_NAME_OVERLEN)
+#else
+ /* all lossy case */
+#define NEED_INVALIDATE_SFN(x) (x)
+#endif
+
+/* NOTE :
+ * We should keep shortname code compatible with v1.0.15 or lower
+ * So, we try to check ext-only-name at create-mode only.
+ *
+ * i.e. '.mtp' ->
+ * v1.0.15 : ' MTP' with name_case, 0x10
+ * v1.1.0 : 'MT????~?' with name_case, 0x00 and longname.
+ */
+static inline void preprocess_ext_only_sfn(s32 lookup, u16 first_char, DOS_NAME_T *p_dosname, s32 *lossy)
+{
+#ifdef CONFIG_SDFAT_RESTRICT_EXT_ONLY_SFN
+ int i;
+ /* check ext-only-name at create-mode */
+ if (*lossy || lookup || (first_char != (u16)'.'))
+ return;
+
+ p_dosname->name_case = 0xFF;
+
+ /* move ext-name to base-name */
+ for (i = 0; i < 3; i++) {
+ p_dosname->name[i] = p_dosname->name[8+i];
+ if (p_dosname->name[i] == ' ')
+ p_dosname->name[i] = '_';
+ }
+
+ /* fill remained space with '_' */
+ for (i = 3; i < 8; i++)
+ p_dosname->name[i] = '_';
+
+ /* eliminate ext-name */
+ for (i = 8; i < 11; i++)
+ p_dosname->name[i] = ' ';
+
+ *lossy = NLS_NAME_LOSSY;
+#endif /* CONFIG_SDFAT_CAN_CREATE_EXT_ONLY_SFN */
+}
+
+/* input : dir, uni_name
+ * output : num_of_entry, dos_name(format : aaaaaa~1.bbb)
+ */
+static s32 get_num_entries_and_dos_name(struct super_block *sb, CHAIN_T *p_dir,
+ UNI_NAME_T *p_uniname, s32 *entries,
+ DOS_NAME_T *p_dosname, s32 lookup)
+{
+ s32 ret, num_entries, lossy = NLS_NAME_NO_LOSSY;
+ s8 **r;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ /* Init null char. */
+ p_dosname->name[0] = '\0';
+
+ num_entries = fsi->fs_func->calc_num_entries(p_uniname);
+ if (num_entries == 0)
+ return -EINVAL;
+
+ if (fsi->vol_type == EXFAT)
+ goto out;
+
+ nls_uni16s_to_sfn(sb, p_uniname, p_dosname, &lossy);
+
+ preprocess_ext_only_sfn(lookup, p_uniname->name[0], p_dosname, &lossy);
+
+ if (!lossy) {
+ for (r = reserved_names; *r; r++) {
+ if (!strncmp((void *) p_dosname->name, *r, 8))
+ return -EINVAL;
+ }
+
+ if (p_dosname->name_case != 0xFF)
+ num_entries = 1;
+ } else if (!lookup) {
+ /* create new dos name */
+ ret = fat_generate_dos_name_new(sb, p_dir, p_dosname,
+ num_entries);
+ if (ret)
+ return ret;
+
+ } else if (NEED_INVALIDATE_SFN(lossy)) {
+ /* FIXME : We should check num_entries */
+ p_dosname->name[0] = '\0';
+ }
+
+ if (num_entries > 1)
+ p_dosname->name_case = 0x0;
+out:
+ *entries = num_entries;
+ return 0;
+} /* end of get_num_entries_and_dos_name */
+
+void get_uniname_from_dos_entry(struct super_block *sb, DOS_DENTRY_T *ep, UNI_NAME_T *p_uniname, u8 mode)
+{
+ DOS_NAME_T dos_name;
+
+ if (mode == 0x0)
+ dos_name.name_case = 0x0;
+ else
+ dos_name.name_case = ep->lcase;
+
+ memcpy(dos_name.name, ep->name, DOS_NAME_LENGTH);
+ nls_sfn_to_uni16s(sb, &dos_name, p_uniname);
+} /* end of get_uniname_from_dos_entry */
+
+/* returns the length of a struct qstr, ignoring trailing dots */
+static inline unsigned int __striptail_len(unsigned int len, const char *name)
+{
+ while (len && name[len - 1] == '.')
+ len--;
+ return len;
+}
+
+/*
+ * Name Resolution Functions :
+ * Zero if it was successful; otherwise nonzero.
+ */
+static s32 __resolve_path(struct inode *inode, const u8 *path, CHAIN_T *p_dir, UNI_NAME_T *p_uniname, int lookup)
+{
+ s32 namelen;
+ s32 lossy = NLS_NAME_NO_LOSSY;
+ struct super_block *sb = inode->i_sb;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ FILE_ID_T *fid = &(SDFAT_I(inode)->fid);
+
+ /* DOT and DOTDOT are handled by VFS layer */
+
+ /* strip all trailing spaces */
+ /* DO NOTHING : Is needed? */
+
+ /* strip all trailing periods */
+ namelen = __striptail_len(strlen(path), path);
+ if (!namelen)
+ return -ENOENT;
+
+ /* the limitation of linux? */
+ if (strlen(path) > (MAX_NAME_LENGTH * MAX_CHARSET_SIZE))
+ return -ENAMETOOLONG;
+
+ /*
+ * strip all leading spaces :
+ * "MS windows 7" supports leading spaces.
+ * So we should skip this preprocessing for compatibility.
+ */
+
+ /* file name conversion :
+ * If lookup case, we allow bad-name for compatibility.
+ */
+ namelen = nls_vfsname_to_uni16s(sb, path, namelen, p_uniname, &lossy);
+ if (namelen < 0)
+ return namelen; /* return error value */
+
+ if ((lossy && !lookup) || !namelen)
+ return -EINVAL;
+
+ sdfat_debug_bug_on(fid->size != i_size_read(inode));
+// fid->size = i_size_read(inode);
+
+ p_dir->dir = fid->start_clu;
+ p_dir->size = (u32)(fid->size >> fsi->cluster_size_bits);
+ p_dir->flags = fid->flags;
+
+ return 0;
+}
+
+static inline s32 resolve_path(struct inode *inode, const u8 *path, CHAIN_T *dir, UNI_NAME_T *uni)
+{
+ return __resolve_path(inode, path, dir, uni, 0);
+}
+
+static inline s32 resolve_path_for_lookup(struct inode *inode, const u8 *path, CHAIN_T *dir, UNI_NAME_T *uni)
+{
+ return __resolve_path(inode, path, dir, uni, 1);
+}
+
+static s32 create_dir(struct inode *inode, CHAIN_T *p_dir, UNI_NAME_T *p_uniname, FILE_ID_T *fid)
+{
+ s32 dentry, num_entries;
+ u64 ret;
+ u64 size;
+ CHAIN_T clu;
+ DOS_NAME_T dos_name, dot_name;
+ struct super_block *sb = inode->i_sb;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ ret = get_num_entries_and_dos_name(sb, p_dir, p_uniname, &num_entries, &dos_name, 0);
+ if (ret)
+ return ret;
+
+ /* find_empty_entry must be called before alloc_cluster */
+ dentry = find_empty_entry(inode, p_dir, num_entries);
+ if (dentry < 0)
+ return dentry; /* -EIO or -ENOSPC */
+
+ clu.dir = CLUS_EOF;
+ clu.size = 0;
+ clu.flags = (fsi->vol_type == EXFAT) ? 0x03 : 0x01;
+
+ /* (0) Check if there are reserved clusters up to max. */
+ if ((fsi->used_clusters + fsi->reserved_clusters) >= (fsi->num_clusters - CLUS_BASE))
+ return -ENOSPC;
+
+ /* (1) allocate a cluster */
+ ret = fsi->fs_func->alloc_cluster(sb, 1, &clu, ALLOC_HOT);
+ if (ret)
+ return ret;
+
+ ret = __clear_cluster(inode, clu.dir);
+ if (ret)
+ return ret;
+
+ size = fsi->cluster_size;
+ if (fsi->vol_type != EXFAT) {
+ /* initialize the . and .. entry
+ * Information for . points to itself
+ * Information for .. points to parent dir
+ */
+
+ dot_name.name_case = 0x0;
+ memcpy(dot_name.name, DOS_CUR_DIR_NAME, DOS_NAME_LENGTH);
+
+ ret = fsi->fs_func->init_dir_entry(sb, &clu, 0, TYPE_DIR, clu.dir, 0);
+ if (ret)
+ return ret;
+
+ ret = fsi->fs_func->init_ext_entry(sb, &clu, 0, 1, NULL, &dot_name);
+ if (ret)
+ return ret;
+
+ memcpy(dot_name.name, DOS_PAR_DIR_NAME, DOS_NAME_LENGTH);
+
+ if (p_dir->dir == fsi->root_dir)
+ ret = fsi->fs_func->init_dir_entry(sb, &clu, 1, TYPE_DIR, CLUS_FREE, 0);
+ else
+ ret = fsi->fs_func->init_dir_entry(sb, &clu, 1, TYPE_DIR, p_dir->dir, 0);
+
+ if (ret)
+ return ret;
+
+ ret = fsi->fs_func->init_ext_entry(sb, &clu, 1, 1, NULL, &dot_name);
+ if (ret)
+ return ret;
+ }
+
+ /* (2) update the directory entry */
+ /* make sub-dir entry in parent directory */
+ ret = fsi->fs_func->init_dir_entry(sb, p_dir, dentry, TYPE_DIR, clu.dir, size);
+ if (ret)
+ return ret;
+
+ ret = fsi->fs_func->init_ext_entry(sb, p_dir, dentry, num_entries, p_uniname, &dos_name);
+ if (ret)
+ return ret;
+
+ fid->dir.dir = p_dir->dir;
+ fid->dir.size = p_dir->size;
+ fid->dir.flags = p_dir->flags;
+ fid->entry = dentry;
+
+ fid->attr = ATTR_SUBDIR;
+ fid->flags = (fsi->vol_type == EXFAT) ? 0x03 : 0x01;
+ fid->size = size;
+ fid->start_clu = clu.dir;
+
+ fid->type = TYPE_DIR;
+ fid->rwoffset = 0;
+ fid->hint_bmap.off = CLUS_EOF;
+
+ /* hint_stat will be used if this is directory. */
+ fid->version = 0;
+ fid->hint_stat.eidx = 0;
+ fid->hint_stat.clu = fid->start_clu;
+ fid->hint_femp.eidx = -1;
+
+ return 0;
+} /* end of create_dir */
+
+static s32 create_file(struct inode *inode, CHAIN_T *p_dir, UNI_NAME_T *p_uniname, u8 mode, FILE_ID_T *fid)
+{
+ s32 ret, dentry, num_entries;
+ DOS_NAME_T dos_name;
+ struct super_block *sb = inode->i_sb;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ ret = get_num_entries_and_dos_name(sb, p_dir, p_uniname, &num_entries, &dos_name, 0);
+ if (ret)
+ return ret;
+
+ /* find_empty_entry must be called before alloc_cluster() */
+ dentry = find_empty_entry(inode, p_dir, num_entries);
+ if (dentry < 0)
+ return dentry; /* -EIO or -ENOSPC */
+
+ /* (1) update the directory entry */
+ /* fill the dos name directory entry information of the created file.
+ * the first cluster is not determined yet. (0)
+ */
+ ret = fsi->fs_func->init_dir_entry(sb, p_dir, dentry, TYPE_FILE | mode, CLUS_FREE, 0);
+ if (ret)
+ return ret;
+
+ ret = fsi->fs_func->init_ext_entry(sb, p_dir, dentry, num_entries, p_uniname, &dos_name);
+ if (ret)
+ return ret;
+
+ fid->dir.dir = p_dir->dir;
+ fid->dir.size = p_dir->size;
+ fid->dir.flags = p_dir->flags;
+ fid->entry = dentry;
+
+ fid->attr = ATTR_ARCHIVE | mode;
+ fid->flags = (fsi->vol_type == EXFAT) ? 0x03 : 0x01;
+ fid->size = 0;
+ fid->start_clu = CLUS_EOF;
+
+ fid->type = TYPE_FILE;
+ fid->rwoffset = 0;
+ fid->hint_bmap.off = CLUS_EOF;
+
+ /* hint_stat will be used if this is directory. */
+ fid->version = 0;
+ fid->hint_stat.eidx = 0;
+ fid->hint_stat.clu = fid->start_clu;
+ fid->hint_femp.eidx = -1;
+
+ return 0;
+} /* end of create_file */
+
+static s32 remove_file(struct inode *inode, CHAIN_T *p_dir, s32 entry)
+{
+ s32 num_entries;
+ u64 sector;
+ DENTRY_T *ep;
+ struct super_block *sb = inode->i_sb;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ ep = get_dentry_in_dir(sb, p_dir, entry, §or);
+ if (!ep)
+ return -EIO;
+
+ dcache_lock(sb, sector);
+
+ /* dcache_lock() before call count_ext_entries() */
+ num_entries = fsi->fs_func->count_ext_entries(sb, p_dir, entry, ep);
+ if (num_entries < 0) {
+ dcache_unlock(sb, sector);
+ return -EIO;
+ }
+ num_entries++;
+
+ dcache_unlock(sb, sector);
+
+ /* (1) update the directory entry */
+ return fsi->fs_func->delete_dir_entry(sb, p_dir, entry, 0, num_entries);
+} /* end of remove_file */
+
+static s32 rename_file(struct inode *inode, CHAIN_T *p_dir, s32 oldentry, UNI_NAME_T *p_uniname, FILE_ID_T *fid)
+{
+ s32 ret, newentry = -1, num_old_entries, num_new_entries;
+ u64 sector_old, sector_new;
+ DOS_NAME_T dos_name;
+ DENTRY_T *epold, *epnew;
+ struct super_block *sb = inode->i_sb;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ epold = get_dentry_in_dir(sb, p_dir, oldentry, §or_old);
+ if (!epold)
+ return -EIO;
+
+ dcache_lock(sb, sector_old);
+
+ /* dcache_lock() before call count_ext_entries() */
+ num_old_entries = fsi->fs_func->count_ext_entries(sb, p_dir, oldentry, epold);
+ if (num_old_entries < 0) {
+ dcache_unlock(sb, sector_old);
+ return -EIO;
+ }
+ num_old_entries++;
+
+ ret = get_num_entries_and_dos_name(sb, p_dir, p_uniname, &num_new_entries, &dos_name, 0);
+ if (ret) {
+ dcache_unlock(sb, sector_old);
+ return ret;
+ }
+
+ if (num_old_entries < num_new_entries) {
+ newentry = find_empty_entry(inode, p_dir, num_new_entries);
+ if (newentry < 0) {
+ dcache_unlock(sb, sector_old);
+ return newentry; /* -EIO or -ENOSPC */
+ }
+
+ epnew = get_dentry_in_dir(sb, p_dir, newentry, §or_new);
+ if (!epnew) {
+ dcache_unlock(sb, sector_old);
+ return -EIO;
+ }
+
+ memcpy((void *) epnew, (void *) epold, DENTRY_SIZE);
+ if (fsi->fs_func->get_entry_type(epnew) == TYPE_FILE) {
+ fsi->fs_func->set_entry_attr(epnew, fsi->fs_func->get_entry_attr(epnew) | ATTR_ARCHIVE);
+ fid->attr |= ATTR_ARCHIVE;
+ }
+ dcache_modify(sb, sector_new);
+ dcache_unlock(sb, sector_old);
+
+ if (fsi->vol_type == EXFAT) {
+ epold = get_dentry_in_dir(sb, p_dir, oldentry+1, §or_old);
+ dcache_lock(sb, sector_old);
+ epnew = get_dentry_in_dir(sb, p_dir, newentry+1, §or_new);
+
+ if (!epold || !epnew) {
+ dcache_unlock(sb, sector_old);
+ return -EIO;
+ }
+
+ memcpy((void *) epnew, (void *) epold, DENTRY_SIZE);
+ dcache_modify(sb, sector_new);
+ dcache_unlock(sb, sector_old);
+ }
+
+ ret = fsi->fs_func->init_ext_entry(sb, p_dir, newentry, num_new_entries, p_uniname, &dos_name);
+ if (ret)
+ return ret;
+
+ fsi->fs_func->delete_dir_entry(sb, p_dir, oldentry, 0, num_old_entries);
+ fid->entry = newentry;
+ } else {
+ if (fsi->fs_func->get_entry_type(epold) == TYPE_FILE) {
+ fsi->fs_func->set_entry_attr(epold, fsi->fs_func->get_entry_attr(epold) | ATTR_ARCHIVE);
+ fid->attr |= ATTR_ARCHIVE;
+ }
+ dcache_modify(sb, sector_old);
+ dcache_unlock(sb, sector_old);
+
+ ret = fsi->fs_func->init_ext_entry(sb, p_dir, oldentry, num_new_entries, p_uniname, &dos_name);
+ if (ret)
+ return ret;
+
+ fsi->fs_func->delete_dir_entry(sb, p_dir, oldentry, num_new_entries, num_old_entries);
+ }
+
+ return 0;
+} /* end of rename_file */
+
+static s32 move_file(struct inode *inode, CHAIN_T *p_olddir, s32 oldentry,
+ CHAIN_T *p_newdir, UNI_NAME_T *p_uniname, FILE_ID_T *fid)
+{
+ s32 ret, newentry, num_new_entries, num_old_entries;
+ u64 sector_mov, sector_new;
+ CHAIN_T clu;
+ DOS_NAME_T dos_name;
+ DENTRY_T *epmov, *epnew;
+ struct super_block *sb = inode->i_sb;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ epmov = get_dentry_in_dir(sb, p_olddir, oldentry, §or_mov);
+ if (!epmov)
+ return -EIO;
+
+ /* check if the source and target directory is the same */
+ if (fsi->fs_func->get_entry_type(epmov) == TYPE_DIR &&
+ fsi->fs_func->get_entry_clu0(epmov) == p_newdir->dir)
+ return -EINVAL;
+
+ dcache_lock(sb, sector_mov);
+
+ /* dcache_lock() before call count_ext_entries() */
+ num_old_entries = fsi->fs_func->count_ext_entries(sb, p_olddir, oldentry, epmov);
+ if (num_old_entries < 0) {
+ dcache_unlock(sb, sector_mov);
+ return -EIO;
+ }
+ num_old_entries++;
+
+ ret = get_num_entries_and_dos_name(sb, p_newdir, p_uniname, &num_new_entries, &dos_name, 0);
+ if (ret) {
+ dcache_unlock(sb, sector_mov);
+ return ret;
+ }
+
+ newentry = find_empty_entry(inode, p_newdir, num_new_entries);
+ if (newentry < 0) {
+ dcache_unlock(sb, sector_mov);
+ return newentry; /* -EIO or -ENOSPC */
+ }
+
+ epnew = get_dentry_in_dir(sb, p_newdir, newentry, §or_new);
+ if (!epnew) {
+ dcache_unlock(sb, sector_mov);
+ return -EIO;
+ }
+
+ memcpy((void *) epnew, (void *) epmov, DENTRY_SIZE);
+ if (fsi->fs_func->get_entry_type(epnew) == TYPE_FILE) {
+ fsi->fs_func->set_entry_attr(epnew, fsi->fs_func->get_entry_attr(epnew) | ATTR_ARCHIVE);
+ fid->attr |= ATTR_ARCHIVE;
+ }
+ dcache_modify(sb, sector_new);
+ dcache_unlock(sb, sector_mov);
+
+ if (fsi->vol_type == EXFAT) {
+ epmov = get_dentry_in_dir(sb, p_olddir, oldentry+1, §or_mov);
+ dcache_lock(sb, sector_mov);
+ epnew = get_dentry_in_dir(sb, p_newdir, newentry+1, §or_new);
+ if (!epmov || !epnew) {
+ dcache_unlock(sb, sector_mov);
+ return -EIO;
+ }
+
+ memcpy((void *) epnew, (void *) epmov, DENTRY_SIZE);
+ dcache_modify(sb, sector_new);
+ dcache_unlock(sb, sector_mov);
+ } else if (fsi->fs_func->get_entry_type(epnew) == TYPE_DIR) {
+ /* change ".." pointer to new parent dir */
+ clu.dir = fsi->fs_func->get_entry_clu0(epnew);
+ clu.flags = 0x01;
+
+ epnew = get_dentry_in_dir(sb, &clu, 1, §or_new);
+ if (!epnew)
+ return -EIO;
+
+ if (p_newdir->dir == fsi->root_dir)
+ fsi->fs_func->set_entry_clu0(epnew, CLUS_FREE);
+ else
+ fsi->fs_func->set_entry_clu0(epnew, p_newdir->dir);
+ dcache_modify(sb, sector_new);
+ }
+
+ ret = fsi->fs_func->init_ext_entry(sb, p_newdir, newentry, num_new_entries, p_uniname, &dos_name);
+ if (ret)
+ return ret;
+
+ fsi->fs_func->delete_dir_entry(sb, p_olddir, oldentry, 0, num_old_entries);
+
+ fid->dir.dir = p_newdir->dir;
+ fid->dir.size = p_newdir->size;
+ fid->dir.flags = p_newdir->flags;
+
+ fid->entry = newentry;
+
+ return 0;
+} /* end of move_file */
+
+
+/*======================================================================*/
+/* Global Function Definitions */
+/*======================================================================*/
+/* roll back to the initial state of the file system */
+s32 fscore_init(void)
+{
+ s32 ret;
+
+ ret = check_type_size();
+ if (ret)
+ return ret;
+
+ return extent_cache_init();
+}
+
+/* make free all memory-alloced global buffers */
+s32 fscore_shutdown(void)
+{
+ extent_cache_shutdown();
+ return 0;
+}
+
+/* check device is ejected */
+s32 fscore_check_bdi_valid(struct super_block *sb)
+{
+ return bdev_check_bdi_valid(sb);
+}
+
+static bool is_exfat(pbr_t *pbr)
+{
+ int i = 53;
+
+ do {
+ if (pbr->bpb.f64.res_zero[i-1])
+ break;
+ } while (--i);
+ return i ? false : true;
+}
+
+static bool is_fat32(pbr_t *pbr)
+{
+ if (le16_to_cpu(pbr->bpb.fat.num_fat_sectors))
+ return false;
+ return true;
+}
+
+inline pbr_t *read_pbr_with_logical_sector(struct super_block *sb, struct buffer_head **prev_bh)
+{
+ pbr_t *p_pbr = (pbr_t *) (*prev_bh)->b_data;
+ u16 logical_sect = 0;
+
+ if (is_exfat(p_pbr))
+ logical_sect = 1 << p_pbr->bsx.f64.sect_size_bits;
+ else
+ logical_sect = get_unaligned_le16(&p_pbr->bpb.fat.sect_size);
+
+ /* is x a power of 2?
+ * (x) != 0 && (((x) & ((x) - 1)) == 0)
+ */
+ if (!is_power_of_2(logical_sect)
+ || (logical_sect < 512)
+ || (logical_sect > 4096)) {
+ sdfat_log_msg(sb, KERN_ERR, "bogus logical sector size %u",
+ logical_sect);
+ return NULL;
+ }
+
+ if (logical_sect < sb->s_blocksize) {
+ sdfat_log_msg(sb, KERN_ERR,
+ "logical sector size too small for device"
+ " (logical sector size = %u)", logical_sect);
+ return NULL;
+ }
+
+ if (logical_sect > sb->s_blocksize) {
+ struct buffer_head *bh = NULL;
+
+ __brelse(*prev_bh);
+ *prev_bh = NULL;
+
+ if (!sb_set_blocksize(sb, logical_sect)) {
+ sdfat_log_msg(sb, KERN_ERR,
+ "unable to set blocksize %u", logical_sect);
+ return NULL;
+ }
+ bh = sb_bread(sb, 0);
+ if (!bh) {
+ sdfat_log_msg(sb, KERN_ERR,
+ "unable to read boot sector "
+ "(logical sector size = %lu)", sb->s_blocksize);
+ return NULL;
+ }
+
+ *prev_bh = bh;
+ p_pbr = (pbr_t *) bh->b_data;
+ }
+
+ sdfat_log_msg(sb, KERN_INFO,
+ "set logical sector size : %lu", sb->s_blocksize);
+
+ return p_pbr;
+}
+
+/* mount the file system volume */
+s32 fscore_mount(struct super_block *sb)
+{
+ s32 ret;
+ pbr_t *p_pbr;
+ struct buffer_head *tmp_bh = NULL;
+ struct gendisk *disk = sb->s_bdev->bd_disk;
+ struct hd_struct *part = sb->s_bdev->bd_part;
+ struct sdfat_mount_options *opts = &(SDFAT_SB(sb)->options);
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ /* initialize previous I/O error */
+ fsi->prev_eio = 0;
+
+ /* open the block device */
+ if (bdev_open_dev(sb))
+ return -EIO;
+
+ /* set block size to read super block */
+ sb_min_blocksize(sb, 512);
+
+ /* read boot sector */
+ ret = read_sect(sb, 0, &tmp_bh, 1);
+ if (ret) {
+ sdfat_log_msg(sb, KERN_ERR, "unable to read boot sector");
+ ret = -EIO;
+ goto bd_close;
+ }
+
+ /* PRB is read */
+ p_pbr = (pbr_t *) tmp_bh->b_data;
+
+ /* check the validity of PBR */
+ if (le16_to_cpu((p_pbr->signature)) != PBR_SIGNATURE) {
+ sdfat_log_msg(sb, KERN_ERR, "invalid boot record signature");
+ brelse(tmp_bh);
+ ret = -EINVAL;
+ goto bd_close;
+ }
+
+ /* check logical sector size */
+ p_pbr = read_pbr_with_logical_sector(sb, &tmp_bh);
+ if (!p_pbr) {
+ brelse(tmp_bh);
+ ret = -EIO;
+ goto bd_close;
+ }
+
+ /* fill fs_struct */
+ if (is_exfat(p_pbr)) {
+ if (opts->fs_type && opts->fs_type != FS_TYPE_EXFAT) {
+ sdfat_log_msg(sb, KERN_ERR,
+ "not specified filesystem type "
+ "(media:exfat, opts:%s)",
+ FS_TYPE_STR[opts->fs_type]);
+ ret = -EINVAL;
+ goto free_bh;
+ }
+ /* set maximum file size for exFAT */
+ sb->s_maxbytes = 0x7fffffffffffffffLL;
+ opts->improved_allocation = 0;
+ opts->defrag = 0;
+ ret = mount_exfat(sb, p_pbr);
+ } else {
+ if (opts->fs_type && opts->fs_type != FS_TYPE_VFAT) {
+ sdfat_log_msg(sb, KERN_ERR,
+ "not specified filesystem type "
+ "(media:vfat, opts:%s)",
+ FS_TYPE_STR[opts->fs_type]);
+ ret = -EINVAL;
+ goto free_bh;
+ }
+ /* set maximum file size for FAT */
+ sb->s_maxbytes = 0xffffffff;
+
+ if (is_fat32(p_pbr)) {
+ ret = mount_fat32(sb, p_pbr);
+ } else {
+ opts->improved_allocation = 0;
+ opts->defrag = 0;
+ ret = mount_fat16(sb, p_pbr);
+ }
+ }
+free_bh:
+ brelse(tmp_bh);
+ if (ret) {
+ sdfat_log_msg(sb, KERN_ERR, "failed to mount fs-core");
+ goto bd_close;
+ }
+
+ /* warn misaligned data data start sector must be a multiple of clu_size */
+ sdfat_log_msg(sb, KERN_INFO,
+ "detected volume info : %s "
+ "(%04hX-%04hX, bps : %lu, spc : %u, data start : %llu, %s)",
+ sdfat_get_vol_type_str(fsi->vol_type),
+ (fsi->vol_id >> 16) & 0xffff, fsi->vol_id & 0xffff,
+ sb->s_blocksize, fsi->sect_per_clus, fsi->data_start_sector,
+ (fsi->data_start_sector & (fsi->sect_per_clus - 1)) ?
+ "misaligned" : "aligned");
+
+ sdfat_log_msg(sb, KERN_INFO,
+ "detected volume size : %llu KB (disk : %llu KB, "
+ "part : %llu KB)",
+ (fsi->num_sectors * (sb->s_blocksize >> SECTOR_SIZE_BITS)) >> 1,
+ disk ? (u64)((disk->part0.nr_sects) >> 1) : 0,
+ part ? (u64)((part->nr_sects) >> 1) : 0);
+
+ ret = load_upcase_table(sb);
+ if (ret) {
+ sdfat_log_msg(sb, KERN_ERR, "failed to load upcase table");
+ goto bd_close;
+ }
+
+ if (fsi->vol_type != EXFAT)
+ goto update_used_clus;
+
+ /* allocate-bitmap is only for exFAT */
+ ret = load_alloc_bmp(sb);
+ if (ret) {
+ sdfat_log_msg(sb, KERN_ERR, "failed to load alloc-bitmap");
+ goto free_upcase;
+ }
+
+update_used_clus:
+ if (fsi->used_clusters == (u32) ~0) {
+ ret = fsi->fs_func->count_used_clusters(sb, &fsi->used_clusters);
+ if (ret) {
+ sdfat_log_msg(sb, KERN_ERR, "failed to scan clusters");
+ goto free_alloc_bmp;
+ }
+ }
+
+ return 0;
+free_alloc_bmp:
+ if (fsi->vol_type == EXFAT)
+ free_alloc_bmp(sb);
+free_upcase:
+ free_upcase_table(sb);
+bd_close:
+ bdev_close_dev(sb);
+ return ret;
+} /* end of fscore_mount */
+
+/* umount the file system volume */
+s32 fscore_umount(struct super_block *sb)
+{
+ s32 ret = 0;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ if (fs_sync(sb, 0))
+ ret = -EIO;
+
+ if (fs_set_vol_flags(sb, VOL_CLEAN))
+ ret = -EIO;
+
+ free_upcase_table(sb);
+
+ if (fsi->vol_type == EXFAT)
+ free_alloc_bmp(sb);
+
+ if (fcache_release_all(sb))
+ ret = -EIO;
+
+ if (dcache_release_all(sb))
+ ret = -EIO;
+
+ amap_destroy(sb);
+
+ if (fsi->prev_eio)
+ ret = -EIO;
+ /* close the block device */
+ bdev_close_dev(sb);
+ return ret;
+}
+
+/* get the information of a file system volume */
+s32 fscore_statfs(struct super_block *sb, VOL_INFO_T *info)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ if (fsi->used_clusters == (u32) ~0) {
+ if (fsi->fs_func->count_used_clusters(sb, &fsi->used_clusters))
+ return -EIO;
+ }
+
+ info->FatType = fsi->vol_type;
+ info->ClusterSize = fsi->cluster_size;
+ info->NumClusters = fsi->num_clusters - 2; /* clu 0 & 1 */
+ info->UsedClusters = fsi->used_clusters + fsi->reserved_clusters;
+ info->FreeClusters = info->NumClusters - info->UsedClusters;
+
+ return 0;
+}
+
+/* synchronize all file system volumes */
+s32 fscore_sync_fs(struct super_block *sb, s32 do_sync)
+{
+ /* synchronize the file system */
+ if (fs_sync(sb, do_sync))
+ return -EIO;
+
+ if (fs_set_vol_flags(sb, VOL_CLEAN))
+ return -EIO;
+
+ return 0;
+}
+
+/* stat allocation unit of a file system volume */
+u32 fscore_get_au_stat(struct super_block *sb, s32 mode)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ if (fsi->fs_func->get_au_stat)
+ return fsi->fs_func->get_au_stat(sb, mode);
+
+ /* No error, just returns 0 */
+ return 0;
+}
+
+
+/*----------------------------------------------------------------------*/
+/* File Operation Functions */
+/*----------------------------------------------------------------------*/
+/* lookup a file */
+s32 fscore_lookup(struct inode *inode, u8 *path, FILE_ID_T *fid)
+{
+ s32 ret, dentry, num_entries;
+ CHAIN_T dir;
+ UNI_NAME_T uni_name;
+ DOS_NAME_T dos_name;
+ DENTRY_T *ep, *ep2;
+ ENTRY_SET_CACHE_T *es = NULL;
+ struct super_block *sb = inode->i_sb;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ FILE_ID_T *dir_fid = &(SDFAT_I(inode)->fid);
+
+ TMSG("%s entered\n", __func__);
+
+ /* check the validity of directory name in the given pathname */
+ ret = resolve_path_for_lookup(inode, path, &dir, &uni_name);
+ if (ret)
+ return ret;
+
+ ret = get_num_entries_and_dos_name(sb, &dir, &uni_name, &num_entries, &dos_name, 1);
+ if (ret)
+ return ret;
+
+ /* check the validation of hint_stat and initialize it if required */
+ if (dir_fid->version != (u32)inode_peek_iversion(inode)) {
+ dir_fid->hint_stat.clu = dir.dir;
+ dir_fid->hint_stat.eidx = 0;
+ dir_fid->version = (u32)inode_peek_iversion(inode);
+ dir_fid->hint_femp.eidx = -1;
+ }
+
+ /* search the file name for directories */
+ dentry = fsi->fs_func->find_dir_entry(sb, dir_fid, &dir, &uni_name,
+ num_entries, &dos_name, TYPE_ALL);
+
+ if ((dentry < 0) && (dentry != -EEXIST))
+ return dentry; /* -error value */
+
+ fid->dir.dir = dir.dir;
+ fid->dir.size = dir.size;
+ fid->dir.flags = dir.flags;
+ fid->entry = dentry;
+
+ /* root directory itself */
+ if (unlikely(dentry == -EEXIST)) {
+ fid->type = TYPE_DIR;
+ fid->rwoffset = 0;
+ fid->hint_bmap.off = CLUS_EOF;
+
+ fid->attr = ATTR_SUBDIR;
+ fid->flags = 0x01;
+ fid->size = 0;
+ fid->start_clu = fsi->root_dir;
+ } else {
+ if (fsi->vol_type == EXFAT) {
+ es = get_dentry_set_in_dir(sb, &dir, dentry, ES_2_ENTRIES, &ep);
+ if (!es)
+ return -EIO;
+ ep2 = ep+1;
+ } else {
+ ep = get_dentry_in_dir(sb, &dir, dentry, NULL);
+ if (!ep)
+ return -EIO;
+ ep2 = ep;
+ }
+
+ fid->type = fsi->fs_func->get_entry_type(ep);
+ fid->rwoffset = 0;
+ fid->hint_bmap.off = CLUS_EOF;
+ fid->attr = fsi->fs_func->get_entry_attr(ep);
+
+ fid->size = fsi->fs_func->get_entry_size(ep2);
+ if ((fid->type == TYPE_FILE) && (fid->size == 0)) {
+ fid->flags = (fsi->vol_type == EXFAT) ? 0x03 : 0x01;
+ fid->start_clu = CLUS_EOF;
+ } else {
+ fid->flags = fsi->fs_func->get_entry_flag(ep2);
+ fid->start_clu = fsi->fs_func->get_entry_clu0(ep2);
+ }
+
+ if ((fid->type == TYPE_DIR) && (fsi->vol_type != EXFAT)) {
+ u32 num_clu = 0;
+ CHAIN_T tmp_dir;
+
+ tmp_dir.dir = fid->start_clu;
+ tmp_dir.flags = fid->flags;
+ tmp_dir.size = 0; /* UNUSED */
+
+ if (__count_num_clusters(sb, &tmp_dir, &num_clu))
+ return -EIO;
+ fid->size = (u64)num_clu << fsi->cluster_size_bits;
+ }
+
+ /* FOR GRACEFUL ERROR HANDLING */
+ if (IS_CLUS_FREE(fid->start_clu)) {
+ sdfat_fs_error(sb,
+ "non-zero size file starts with zero cluster "
+ "(size : %llu, p_dir : %u, entry : 0x%08x)",
+ fid->size, fid->dir.dir, fid->entry);
+ sdfat_debug_bug_on(1);
+ return -EIO;
+ }
+
+ if (fsi->vol_type == EXFAT)
+ release_dentry_set(es);
+ }
+
+ /* hint_stat will be used if this is directory. */
+ fid->version = 0;
+ fid->hint_stat.eidx = 0;
+ fid->hint_stat.clu = fid->start_clu;
+ fid->hint_femp.eidx = -1;
+
+ TMSG("%s exited successfully\n", __func__);
+ return 0;
+} /* end of fscore_lookup */
+
+/* create a file */
+s32 fscore_create(struct inode *inode, u8 *path, u8 mode, FILE_ID_T *fid)
+{
+ s32 ret/*, dentry*/;
+ CHAIN_T dir;
+ UNI_NAME_T uni_name;
+ struct super_block *sb = inode->i_sb;
+
+ /* check the validity of directory name in the given pathname */
+ ret = resolve_path(inode, path, &dir, &uni_name);
+ if (ret)
+ return ret;
+
+ fs_set_vol_flags(sb, VOL_DIRTY);
+
+ /* create a new file */
+ ret = create_file(inode, &dir, &uni_name, mode, fid);
+
+ fs_sync(sb, 0);
+ fs_set_vol_flags(sb, VOL_CLEAN);
+
+ return ret;
+}
+
+/* read data from a opened file */
+s32 fscore_read_link(struct inode *inode, FILE_ID_T *fid, void *buffer, u64 count, u64 *rcount)
+{
+ s32 ret = 0;
+ s32 offset, sec_offset;
+ u32 clu_offset;
+ u32 clu;
+ u64 logsector, oneblkread, read_bytes;
+ struct buffer_head *tmp_bh = NULL;
+ struct super_block *sb = inode->i_sb;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ /* check if the given file ID is opened */
+ if (fid->type != TYPE_FILE)
+ return -EPERM;
+
+ if (fid->rwoffset > fid->size)
+ fid->rwoffset = fid->size;
+
+ if (count > (fid->size - fid->rwoffset))
+ count = fid->size - fid->rwoffset;
+
+ if (count == 0) {
+ if (rcount)
+ *rcount = 0;
+ return 0;
+ }
+
+ read_bytes = 0;
+
+ while (count > 0) {
+ clu_offset = fid->rwoffset >> fsi->cluster_size_bits;
+ clu = fid->start_clu;
+
+ if (fid->flags == 0x03) {
+ clu += clu_offset;
+ } else {
+ /* hint information */
+ if ((clu_offset > 0) &&
+ ((fid->hint_bmap.off != CLUS_EOF) && (fid->hint_bmap.off > 0)) &&
+ (clu_offset >= fid->hint_bmap.off)) {
+ clu_offset -= fid->hint_bmap.off;
+ clu = fid->hint_bmap.clu;
+ }
+
+ while (clu_offset > 0) {
+ ret = get_next_clus_safe(sb, &clu);
+ if (ret)
+ goto err_out;
+
+ clu_offset--;
+ }
+ }
+
+ /* hint information */
+ fid->hint_bmap.off = fid->rwoffset >> fsi->cluster_size_bits;
+ fid->hint_bmap.clu = clu;
+
+ offset = (s32)(fid->rwoffset & (fsi->cluster_size - 1)); /* byte offset in cluster */
+ sec_offset = offset >> sb->s_blocksize_bits; /* sector offset in cluster */
+ offset &= (sb->s_blocksize - 1); /* byte offset in sector */
+
+ logsector = CLUS_TO_SECT(fsi, clu) + sec_offset;
+
+ oneblkread = (u64)(sb->s_blocksize - offset);
+ if (oneblkread > count)
+ oneblkread = count;
+
+ if ((offset == 0) && (oneblkread == sb->s_blocksize)) {
+ ret = read_sect(sb, logsector, &tmp_bh, 1);
+ if (ret)
+ goto err_out;
+ memcpy(((s8 *) buffer)+read_bytes, ((s8 *) tmp_bh->b_data), (s32) oneblkread);
+ } else {
+ ret = read_sect(sb, logsector, &tmp_bh, 1);
+ if (ret)
+ goto err_out;
+ memcpy(((s8 *) buffer)+read_bytes, ((s8 *) tmp_bh->b_data)+offset, (s32) oneblkread);
+ }
+ count -= oneblkread;
+ read_bytes += oneblkread;
+ fid->rwoffset += oneblkread;
+ }
+
+err_out:
+ brelse(tmp_bh);
+
+ /* set the size of read bytes */
+ if (rcount != NULL)
+ *rcount = read_bytes;
+
+ return ret;
+} /* end of fscore_read_link */
+
+/* write data into a opened file */
+s32 fscore_write_link(struct inode *inode, FILE_ID_T *fid, void *buffer, u64 count, u64 *wcount)
+{
+ s32 ret = 0;
+ s32 modified = false, offset, sec_offset;
+ u32 clu_offset, num_clusters, num_alloc;
+ u32 clu, last_clu;
+ u64 logsector, sector, oneblkwrite, write_bytes;
+ CHAIN_T new_clu;
+ TIMESTAMP_T tm;
+ DENTRY_T *ep, *ep2;
+ ENTRY_SET_CACHE_T *es = NULL;
+ struct buffer_head *tmp_bh = NULL;
+ struct super_block *sb = inode->i_sb;
+ u32 blksize = (u32)sb->s_blocksize;
+ u32 blksize_mask = (u32)(sb->s_blocksize-1);
+ u8 blksize_bits = sb->s_blocksize_bits;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ /* check if the given file ID is opened */
+ if (fid->type != TYPE_FILE)
+ return -EPERM;
+
+ if (fid->rwoffset > fid->size)
+ fid->rwoffset = fid->size;
+
+ if (count == 0) {
+ if (wcount)
+ *wcount = 0;
+ return 0;
+ }
+
+ fs_set_vol_flags(sb, VOL_DIRTY);
+
+ if (fid->size == 0)
+ num_clusters = 0;
+ else
+ num_clusters = ((fid->size-1) >> fsi->cluster_size_bits) + 1;
+
+ write_bytes = 0;
+
+ while (count > 0) {
+ clu_offset = (fid->rwoffset >> fsi->cluster_size_bits);
+ clu = last_clu = fid->start_clu;
+
+ if (fid->flags == 0x03) {
+ if ((clu_offset > 0) && (!IS_CLUS_EOF(clu))) {
+ last_clu += clu_offset - 1;
+
+ if (clu_offset == num_clusters)
+ clu = CLUS_EOF;
+ else
+ clu += clu_offset;
+ }
+ } else {
+ /* hint information */
+ if ((clu_offset > 0) &&
+ ((fid->hint_bmap.off != CLUS_EOF) && (fid->hint_bmap.off > 0)) &&
+ (clu_offset >= fid->hint_bmap.off)) {
+ clu_offset -= fid->hint_bmap.off;
+ clu = fid->hint_bmap.clu;
+ }
+
+ while ((clu_offset > 0) && (!IS_CLUS_EOF(clu))) {
+ last_clu = clu;
+ ret = get_next_clus_safe(sb, &clu);
+ if (ret)
+ goto err_out;
+
+ clu_offset--;
+ }
+ }
+
+ if (IS_CLUS_EOF(clu)) {
+ num_alloc = ((count-1) >> fsi->cluster_size_bits) + 1;
+ new_clu.dir = IS_CLUS_EOF(last_clu) ? CLUS_EOF : last_clu+1;
+ new_clu.size = 0;
+ new_clu.flags = fid->flags;
+
+ /* (1) allocate a chain of clusters */
+ ret = fsi->fs_func->alloc_cluster(sb, num_alloc, &new_clu, ALLOC_COLD);
+ if (ret)
+ goto err_out;
+
+ /* (2) append to the FAT chain */
+ if (IS_CLUS_EOF(last_clu)) {
+ if (new_clu.flags == 0x01)
+ fid->flags = 0x01;
+ fid->start_clu = new_clu.dir;
+ modified = true;
+ } else {
+ if (new_clu.flags != fid->flags) {
+ /* no-fat-chain bit is disabled,
+ * so fat-chain should be synced with
+ * alloc-bmp
+ */
+ chain_cont_cluster(sb, fid->start_clu, num_clusters);
+ fid->flags = 0x01;
+ modified = true;
+ }
+ if (new_clu.flags == 0x01) {
+ ret = fat_ent_set(sb, last_clu, new_clu.dir);
+ if (ret)
+ goto err_out;
+ }
+ }
+
+ num_clusters += num_alloc;
+ clu = new_clu.dir;
+ }
+
+ /* hint information */
+ fid->hint_bmap.off = fid->rwoffset >> fsi->cluster_size_bits;
+ fid->hint_bmap.clu = clu;
+
+ /* byte offset in cluster */
+ offset = (s32)(fid->rwoffset & (fsi->cluster_size-1));
+ /* sector offset in cluster */
+ sec_offset = offset >> blksize_bits;
+ /* byte offset in sector */
+ offset &= blksize_mask;
+ logsector = CLUS_TO_SECT(fsi, clu) + sec_offset;
+
+ oneblkwrite = (u64)(blksize - offset);
+ if (oneblkwrite > count)
+ oneblkwrite = count;
+
+ if ((offset == 0) && (oneblkwrite == blksize)) {
+ ret = read_sect(sb, logsector, &tmp_bh, 0);
+ if (ret)
+ goto err_out;
+
+ memcpy(((s8 *)tmp_bh->b_data),
+ ((s8 *)buffer)+write_bytes,
+ (s32)oneblkwrite);
+
+ ret = write_sect(sb, logsector, tmp_bh, 0);
+ if (ret) {
+ brelse(tmp_bh);
+ goto err_out;
+ }
+ } else {
+ if ((offset > 0) || ((fid->rwoffset+oneblkwrite) < fid->size)) {
+ ret = read_sect(sb, logsector, &tmp_bh, 1);
+ if (ret)
+ goto err_out;
+ } else {
+ ret = read_sect(sb, logsector, &tmp_bh, 0);
+ if (ret)
+ goto err_out;
+ }
+
+ memcpy(((s8 *) tmp_bh->b_data)+offset, ((s8 *) buffer)+write_bytes, (s32) oneblkwrite);
+ ret = write_sect(sb, logsector, tmp_bh, 0);
+ if (ret) {
+ brelse(tmp_bh);
+ goto err_out;
+ }
+ }
+
+ count -= oneblkwrite;
+ write_bytes += oneblkwrite;
+ fid->rwoffset += oneblkwrite;
+
+ fid->attr |= ATTR_ARCHIVE;
+
+ if (fid->size < fid->rwoffset) {
+ fid->size = fid->rwoffset;
+ modified = true;
+ }
+ }
+
+ brelse(tmp_bh);
+
+ /* (3) update the direcoty entry */
+ /* get_entry_(set_)in_dir shoulb be check DIR_DELETED flag. */
+ if (fsi->vol_type == EXFAT) {
+ es = get_dentry_set_in_dir(sb, &(fid->dir), fid->entry, ES_ALL_ENTRIES, &ep);
+ if (!es) {
+ ret = -EIO;
+ goto err_out;
+ }
+ ep2 = ep+1;
+ } else {
+ ep = get_dentry_in_dir(sb, &(fid->dir), fid->entry, §or);
+ if (!ep) {
+ ret = -EIO;
+ goto err_out;
+ }
+ ep2 = ep;
+ }
+
+ fsi->fs_func->set_entry_time(ep, tm_now(inode, &tm), TM_MODIFY);
+ fsi->fs_func->set_entry_attr(ep, fid->attr);
+
+ if (modified) {
+ if (fsi->fs_func->get_entry_flag(ep2) != fid->flags)
+ fsi->fs_func->set_entry_flag(ep2, fid->flags);
+
+ if (fsi->fs_func->get_entry_size(ep2) != fid->size)
+ fsi->fs_func->set_entry_size(ep2, fid->size);
+
+ if (fsi->fs_func->get_entry_clu0(ep2) != fid->start_clu)
+ fsi->fs_func->set_entry_clu0(ep2, fid->start_clu);
+ }
+
+ if (fsi->vol_type == EXFAT) {
+ if (update_dir_chksum_with_entry_set(sb, es)) {
+ ret = -EIO;
+ goto err_out;
+ }
+ release_dentry_set(es);
+ } else {
+ if (dcache_modify(sb, sector)) {
+ ret = -EIO;
+ goto err_out;
+ }
+ }
+
+ fs_sync(sb, 0);
+ fs_set_vol_flags(sb, VOL_CLEAN);
+
+err_out:
+ /* set the size of written bytes */
+ if (wcount)
+ *wcount = write_bytes;
+
+ return ret;
+} /* end of fscore_write_link */
+
+/* resize the file length */
+s32 fscore_truncate(struct inode *inode, u64 old_size, u64 new_size)
+{
+ u32 num_clusters_new, num_clusters_da, num_clusters_phys;
+ u32 last_clu = CLUS_FREE;
+ u64 sector;
+ CHAIN_T clu;
+ TIMESTAMP_T tm;
+ DENTRY_T *ep, *ep2;
+ struct super_block *sb = inode->i_sb;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ FILE_ID_T *fid = &(SDFAT_I(inode)->fid);
+ ENTRY_SET_CACHE_T *es = NULL;
+ s32 evict = (fid->dir.dir == DIR_DELETED) ? 1 : 0;
+
+ /* check if the given file ID is opened */
+ if ((fid->type != TYPE_FILE) && (fid->type != TYPE_DIR))
+ return -EPERM;
+
+ /* TO CHECK inode type and size */
+ MMSG("%s: inode(%p) type(%s) size:%lld->%lld\n", __func__, inode,
+ (fid->type == TYPE_FILE) ? "file" : "dir", old_size, new_size);
+
+ /* XXX : This is for debugging. */
+
+ /* It can be when write failed */
+#if 0
+ if (fid->size != old_size) {
+ DMSG("%s: inode(%p) size-mismatch(old:%lld != fid:%lld)\n",
+ __func__, inode, old_size, fid->size);
+ WARN_ON(1);
+ }
+#endif
+ /*
+ * There is no lock to protect fid->size.
+ * So, we should get old_size and use it.
+ */
+ if (old_size <= new_size)
+ return 0;
+
+ fs_set_vol_flags(sb, VOL_DIRTY);
+
+ /* Reserved count update */
+ #define num_clusters(v) ((v) ? (u32)(((v) - 1) >> fsi->cluster_size_bits) + 1 : 0)
+ num_clusters_da = num_clusters(SDFAT_I(inode)->i_size_aligned);
+ num_clusters_new = num_clusters(i_size_read(inode));
+ num_clusters_phys = num_clusters(SDFAT_I(inode)->i_size_ondisk);
+
+ /* num_clusters(i_size_old) should be equal to num_clusters_da */
+ BUG_ON((num_clusters(old_size)) != (num_clusters(SDFAT_I(inode)->i_size_aligned)));
+
+ /* for debugging (FIXME: is okay on no-da case?) */
+ BUG_ON(num_clusters_da < num_clusters_phys);
+
+ if ((num_clusters_da != num_clusters_phys) &&
+ (num_clusters_new < num_clusters_da)) {
+ /* Decrement reserved clusters
+ * n_reserved = num_clusters_da - max(new,phys)
+ */
+ int n_reserved = (num_clusters_new > num_clusters_phys) ?
+ (num_clusters_da - num_clusters_new) :
+ (num_clusters_da - num_clusters_phys);
+
+ fsi->reserved_clusters -= n_reserved;
+ BUG_ON(fsi->reserved_clusters < 0);
+ }
+
+ clu.dir = fid->start_clu;
+ /* In no-da case, num_clusters_phys is equal to below value
+ * clu.size = (u32)((old_size-1) >> fsi->cluster_size_bits) + 1;
+ */
+ clu.size = num_clusters_phys;
+ clu.flags = fid->flags;
+
+ /* For bigdata */
+ sdfat_statistics_set_trunc(clu.flags, &clu);
+
+ if (new_size > 0) {
+ /* Truncate FAT chain num_clusters after the first cluster
+ * num_clusters = min(new, phys);
+ */
+ u32 num_clusters = (num_clusters_new < num_clusters_phys) ?
+ num_clusters_new : num_clusters_phys;
+
+ /* Follow FAT chain
+ * (defensive coding - works fine even with corrupted FAT table
+ */
+ if (clu.flags == 0x03) {
+ clu.dir += num_clusters;
+ clu.size -= num_clusters;
+#if 0
+ /* extent_get_clus can`t know last_cluster
+ * when find target cluster in cache.
+ */
+ } else if (fid->type == TYPE_FILE) {
+ u32 fclus = 0;
+ s32 err = extent_get_clus(inode, num_clusters,
+ &fclus, &(clu.dir), &last_clu, 0);
+ if (err)
+ return -EIO;
+ ASSERT(fclus == num_clusters);
+
+ if ((num_clusters > 1) && (last_clu == fid->start_clu)) {
+ u32 fclus_tmp = 0;
+ u32 temp = 0;
+
+ err = extent_get_clus(inode, num_clusters - 1,
+ &fclus_tmp, &last_clu, &temp, 0);
+ if (err)
+ return -EIO;
+ ASSERT(fclus_tmp == (num_clusters - 1));
+ }
+
+ num_clusters -= fclus;
+ clu.size -= fclus;
+#endif
+ } else {
+ while (num_clusters > 0) {
+ last_clu = clu.dir;
+ if (get_next_clus_safe(sb, &(clu.dir)))
+ return -EIO;
+
+ num_clusters--;
+ clu.size--;
+ }
+ }
+
+ /* Optimization avialable: */
+#if 0
+ if (num_clusters_new < num_clusters) {
+ < loop >
+ } else {
+ // num_clusters_new >= num_clusters_phys
+ // FAT truncation is not necessary
+
+ clu.dir = CLUS_EOF;
+ clu.size = 0;
+ }
+#endif
+ } else if (new_size == 0) {
+ fid->flags = (fsi->vol_type == EXFAT) ? 0x03 : 0x01;
+ fid->start_clu = CLUS_EOF;
+ }
+ fid->size = new_size;
+
+ if (fid->type == TYPE_FILE)
+ fid->attr |= ATTR_ARCHIVE;
+
+ /*
+ * clu.dir: free from
+ * clu.size: # of clusters to free (exFAT, 0x03 only), no fat_free if 0
+ * clu.flags: fid->flags (exFAT only)
+ */
+
+ /* (1) update the directory entry */
+ if (!evict) {
+
+ if (fsi->vol_type == EXFAT) {
+ es = get_dentry_set_in_dir(sb, &(fid->dir), fid->entry, ES_ALL_ENTRIES, &ep);
+ if (!es)
+ return -EIO;
+ ep2 = ep+1;
+ } else {
+ ep = get_dentry_in_dir(sb, &(fid->dir), fid->entry, §or);
+ if (!ep)
+ return -EIO;
+ ep2 = ep;
+ }
+
+ fsi->fs_func->set_entry_time(ep, tm_now(inode, &tm), TM_MODIFY);
+ fsi->fs_func->set_entry_attr(ep, fid->attr);
+
+ /*
+ * if (fsi->vol_type != EXFAT)
+ * dcache_modify(sb, sector);
+ */
+
+ /* File size should be zero if there is no cluster allocated */
+ if (IS_CLUS_EOF(fid->start_clu))
+ fsi->fs_func->set_entry_size(ep2, 0);
+ else
+ fsi->fs_func->set_entry_size(ep2, new_size);
+
+ if (new_size == 0) {
+ /* Any directory can not be truncated to zero */
+ BUG_ON(fid->type != TYPE_FILE);
+
+ fsi->fs_func->set_entry_flag(ep2, 0x01);
+ fsi->fs_func->set_entry_clu0(ep2, CLUS_FREE);
+ }
+
+ if (fsi->vol_type == EXFAT) {
+ if (update_dir_chksum_with_entry_set(sb, es))
+ return -EIO;
+ release_dentry_set(es);
+ } else {
+ if (dcache_modify(sb, sector))
+ return -EIO;
+ }
+
+ } /* end of if(fid->dir.dir != DIR_DELETED) */
+
+ /* (2) cut off from the FAT chain */
+ if ((fid->flags == 0x01) &&
+ (!IS_CLUS_FREE(last_clu)) && (!IS_CLUS_EOF(last_clu))) {
+ if (fat_ent_set(sb, last_clu, CLUS_EOF))
+ return -EIO;
+ }
+
+ /* (3) invalidate cache and free the clusters */
+ /* clear extent cache */
+ extent_cache_inval_inode(inode);
+
+ /* hint information */
+ fid->hint_bmap.off = CLUS_EOF;
+ fid->hint_bmap.clu = CLUS_EOF;
+ if (fid->rwoffset > fid->size)
+ fid->rwoffset = fid->size;
+
+ /* hint_stat will be used if this is directory. */
+ fid->hint_stat.eidx = 0;
+ fid->hint_stat.clu = fid->start_clu;
+ fid->hint_femp.eidx = -1;
+
+ /* free the clusters */
+ if (fsi->fs_func->free_cluster(sb, &clu, evict))
+ return -EIO;
+
+ fs_sync(sb, 0);
+ fs_set_vol_flags(sb, VOL_CLEAN);
+
+ return 0;
+} /* end of fscore_truncate */
+
+static void update_parent_info(FILE_ID_T *fid, struct inode *parent_inode)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(parent_inode->i_sb)->fsi);
+ FILE_ID_T *parent_fid = &(SDFAT_I(parent_inode)->fid);
+
+ /*
+ * the problem that FILE_ID_T caches wrong parent info.
+ *
+ * because of flag-mismatch of fid->dir,
+ * there is abnormal traversing cluster chain.
+ */
+ if (unlikely((parent_fid->flags != fid->dir.flags)
+ || (parent_fid->size != (fid->dir.size<<fsi->cluster_size_bits))
+ || (parent_fid->start_clu != fid->dir.dir))) {
+
+ fid->dir.dir = parent_fid->start_clu;
+ fid->dir.flags = parent_fid->flags;
+ fid->dir.size = ((parent_fid->size + (fsi->cluster_size-1))
+ >> fsi->cluster_size_bits);
+ }
+}
+
+/* rename or move a old file into a new file */
+s32 fscore_rename(struct inode *old_parent_inode, FILE_ID_T *fid,
+ struct inode *new_parent_inode, struct dentry *new_dentry)
+{
+ s32 ret;
+ s32 dentry;
+ CHAIN_T olddir, newdir;
+ CHAIN_T *p_dir = NULL;
+ UNI_NAME_T uni_name;
+ DENTRY_T *ep;
+ struct super_block *sb = old_parent_inode->i_sb;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ u8 *new_path = (u8 *) new_dentry->d_name.name;
+ struct inode *new_inode = new_dentry->d_inode;
+ int num_entries;
+ FILE_ID_T *new_fid = NULL;
+ u32 new_entry_type = TYPE_UNUSED;
+ s32 new_entry = 0;
+
+ /* check the validity of pointer parameters */
+ if ((new_path == NULL) || (strlen(new_path) == 0))
+ return -EINVAL;
+
+ if (fid->dir.dir == DIR_DELETED) {
+ EMSG("%s : abnormal access to deleted source dentry\n", __func__);
+ return -ENOENT;
+ }
+
+ /* patch 1.2.4 : the problem that FILE_ID_T caches wrong parent info. */
+ update_parent_info(fid, old_parent_inode);
+
+ olddir.dir = fid->dir.dir;
+ olddir.size = fid->dir.size;
+ olddir.flags = fid->dir.flags;
+
+ dentry = fid->entry;
+
+ /* check if the old file is "." or ".." */
+ if (fsi->vol_type != EXFAT) {
+ if ((olddir.dir != fsi->root_dir) && (dentry < 2))
+ return -EPERM;
+ }
+
+ ep = get_dentry_in_dir(sb, &olddir, dentry, NULL);
+ if (!ep)
+ return -EIO;
+
+#ifdef CONFIG_SDFAT_CHECK_RO_ATTR
+ if (fsi->fs_func->get_entry_attr(ep) & ATTR_READONLY)
+ return -EPERM;
+#endif
+
+ /* check whether new dir is existing directory and empty */
+ if (new_inode) {
+ ret = -EIO;
+ new_fid = &SDFAT_I(new_inode)->fid;
+
+ if (new_fid->dir.dir == DIR_DELETED) {
+ EMSG("%s : abnormal access to deleted target dentry\n", __func__);
+ goto out;
+ }
+
+ /* patch 1.2.4 :
+ * the problem that FILE_ID_T caches wrong parent info.
+ *
+ * FIXME : is needed?
+ */
+ update_parent_info(new_fid, new_parent_inode);
+
+ p_dir = &(new_fid->dir);
+ new_entry = new_fid->entry;
+ ep = get_dentry_in_dir(sb, p_dir, new_entry, NULL);
+ if (!ep)
+ goto out;
+
+ new_entry_type = fsi->fs_func->get_entry_type(ep);
+
+ /* if new_inode exists, update fid */
+ new_fid->size = i_size_read(new_inode);
+
+ if (new_entry_type == TYPE_DIR) {
+ CHAIN_T new_clu;
+
+ new_clu.dir = new_fid->start_clu;
+ new_clu.size = ((new_fid->size-1) >> fsi->cluster_size_bits) + 1;
+ new_clu.flags = new_fid->flags;
+
+ ret = check_dir_empty(sb, &new_clu);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* check the validity of directory name in the given new pathname */
+ ret = resolve_path(new_parent_inode, new_path, &newdir, &uni_name);
+ if (ret)
+ return ret;
+
+ fs_set_vol_flags(sb, VOL_DIRTY);
+
+ if (olddir.dir == newdir.dir)
+ ret = rename_file(new_parent_inode, &olddir, dentry, &uni_name, fid);
+ else
+ ret = move_file(new_parent_inode, &olddir, dentry, &newdir, &uni_name, fid);
+
+ if ((!ret) && new_inode) {
+ /* delete entries of new_dir */
+ ep = get_dentry_in_dir(sb, p_dir, new_entry, NULL);
+ if (!ep) {
+ ret = -EIO;
+ goto del_out;
+ }
+
+ num_entries = fsi->fs_func->count_ext_entries(sb, p_dir, new_entry, ep);
+ if (num_entries < 0) {
+ ret = -EIO;
+ goto del_out;
+ }
+
+
+ if (fsi->fs_func->delete_dir_entry(sb, p_dir, new_entry, 0, num_entries+1)) {
+ ret = -EIO;
+ goto del_out;
+ }
+
+ /* Free the clusters if new_inode is a dir(as if fscore_rmdir) */
+ if (new_entry_type == TYPE_DIR) {
+ /* new_fid, new_clu_to_free */
+ CHAIN_T new_clu_to_free;
+
+ new_clu_to_free.dir = new_fid->start_clu;
+ new_clu_to_free.size = ((new_fid->size-1) >> fsi->cluster_size_bits) + 1;
+ new_clu_to_free.flags = new_fid->flags;
+
+ if (fsi->fs_func->free_cluster(sb, &new_clu_to_free, 1)) {
+ /* just set I/O error only */
+ ret = -EIO;
+ }
+
+ new_fid->size = 0;
+ new_fid->start_clu = CLUS_EOF;
+ new_fid->flags = (fsi->vol_type == EXFAT) ? 0x03 : 0x01;
+ }
+del_out:
+ /* Update new_inode fid
+ * Prevent syncing removed new_inode
+ * (new_fid is already initialized above code ("if (new_inode)")
+ */
+ new_fid->dir.dir = DIR_DELETED;
+ }
+out:
+ fs_sync(sb, 0);
+ fs_set_vol_flags(sb, VOL_CLEAN);
+
+ return ret;
+} /* end of fscore_rename */
+
+/* remove a file */
+s32 fscore_remove(struct inode *inode, FILE_ID_T *fid)
+{
+ s32 ret;
+ s32 dentry;
+ CHAIN_T dir, clu_to_free;
+ DENTRY_T *ep;
+ struct super_block *sb = inode->i_sb;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ dir.dir = fid->dir.dir;
+ dir.size = fid->dir.size;
+ dir.flags = fid->dir.flags;
+
+ dentry = fid->entry;
+
+ if (fid->dir.dir == DIR_DELETED) {
+ EMSG("%s : abnormal access to deleted dentry\n", __func__);
+ return -ENOENT;
+ }
+
+ ep = get_dentry_in_dir(sb, &dir, dentry, NULL);
+ if (!ep)
+ return -EIO;
+
+
+#ifdef CONFIG_SDFAT_CHECK_RO_ATTR
+ if (fsi->fs_func->get_entry_attr(ep) & ATTR_READONLY)
+ return -EPERM;
+#endif
+
+ fs_set_vol_flags(sb, VOL_DIRTY);
+
+ /* (1) update the directory entry */
+ ret = remove_file(inode, &dir, dentry);
+ if (ret)
+ goto out;
+
+ clu_to_free.dir = fid->start_clu;
+ clu_to_free.size = ((fid->size-1) >> fsi->cluster_size_bits) + 1;
+ clu_to_free.flags = fid->flags;
+
+ /* (2) invalidate extent cache and free the clusters
+ */
+ /* clear extent cache */
+ extent_cache_inval_inode(inode);
+ ret = fsi->fs_func->free_cluster(sb, &clu_to_free, 0);
+ /* WARN : DO NOT RETURN ERROR IN HERE */
+
+ /* (3) update FILE_ID_T */
+ fid->size = 0;
+ fid->start_clu = CLUS_EOF;
+ fid->flags = (fsi->vol_type == EXFAT) ? 0x03 : 0x01;
+ fid->dir.dir = DIR_DELETED;
+
+ fs_sync(sb, 0);
+ fs_set_vol_flags(sb, VOL_CLEAN);
+out:
+ return ret;
+} /* end of fscore_remove */
+
+
+/*
+ * Get the information of a given file
+ * REMARK : This function does not need any file name on linux
+ *
+ * info.Size means the value saved on disk.
+ * But root directory doesn`t have real dentry,
+ * so the size of root directory returns calculated one exceptively.
+ */
+s32 fscore_read_inode(struct inode *inode, DIR_ENTRY_T *info)
+{
+ u64 sector;
+ s32 count;
+ CHAIN_T dir;
+ TIMESTAMP_T tm;
+ DENTRY_T *ep, *ep2;
+ struct super_block *sb = inode->i_sb;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ FILE_ID_T *fid = &(SDFAT_I(inode)->fid);
+ ENTRY_SET_CACHE_T *es = NULL;
+ u8 is_dir = (fid->type == TYPE_DIR) ? 1 : 0;
+
+ TMSG("%s entered\n", __func__);
+
+ extent_cache_init_inode(inode);
+
+ /* if root directory */
+ if (is_dir && (fid->dir.dir == fsi->root_dir) && (fid->entry == -1)) {
+ info->Attr = ATTR_SUBDIR;
+ memset((s8 *) &info->CreateTimestamp, 0, sizeof(DATE_TIME_T));
+ memset((s8 *) &info->ModifyTimestamp, 0, sizeof(DATE_TIME_T));
+ memset((s8 *) &info->AccessTimestamp, 0, sizeof(DATE_TIME_T));
+ //strcpy(info->NameBuf.sfn, ".");
+ //strcpy(info->NameBuf.lfn, ".");
+
+ dir.dir = fsi->root_dir;
+ dir.flags = 0x01;
+ dir.size = 0; /* UNUSED */
+
+ /* FAT16 root_dir */
+ if (IS_CLUS_FREE(fsi->root_dir)) {
+ info->Size = fsi->dentries_in_root << DENTRY_SIZE_BITS;
+ } else {
+ u32 num_clu;
+
+ if (__count_num_clusters(sb, &dir, &num_clu))
+ return -EIO;
+ info->Size = (u64)num_clu << fsi->cluster_size_bits;
+ }
+
+ count = __count_dos_name_entries(sb, &dir, TYPE_DIR, NULL);
+ if (count < 0)
+ return -EIO;
+ info->NumSubdirs = count;
+
+ return 0;
+ }
+
+ /* get the directory entry of given file or directory */
+ if (fsi->vol_type == EXFAT) {
+ /* es should be released */
+ es = get_dentry_set_in_dir(sb, &(fid->dir), fid->entry, ES_2_ENTRIES, &ep);
+ if (!es)
+ return -EIO;
+ ep2 = ep+1;
+ } else {
+ ep = get_dentry_in_dir(sb, &(fid->dir), fid->entry, §or);
+ if (!ep)
+ return -EIO;
+ ep2 = ep;
+ /* dcache should be unlocked */
+ dcache_lock(sb, sector);
+ }
+
+ /* set FILE_INFO structure using the acquired DENTRY_T */
+ info->Attr = fsi->fs_func->get_entry_attr(ep);
+
+ fsi->fs_func->get_entry_time(ep, &tm, TM_CREATE);
+ info->CreateTimestamp.Year = tm.year;
+ info->CreateTimestamp.Month = tm.mon;
+ info->CreateTimestamp.Day = tm.day;
+ info->CreateTimestamp.Hour = tm.hour;
+ info->CreateTimestamp.Minute = tm.min;
+ info->CreateTimestamp.Second = tm.sec;
+ info->CreateTimestamp.MilliSecond = 0;
+ info->CreateTimestamp.Timezone.value = tm.tz.value;
+
+ fsi->fs_func->get_entry_time(ep, &tm, TM_MODIFY);
+ info->ModifyTimestamp.Year = tm.year;
+ info->ModifyTimestamp.Month = tm.mon;
+ info->ModifyTimestamp.Day = tm.day;
+ info->ModifyTimestamp.Hour = tm.hour;
+ info->ModifyTimestamp.Minute = tm.min;
+ info->ModifyTimestamp.Second = tm.sec;
+ info->ModifyTimestamp.MilliSecond = 0;
+ info->ModifyTimestamp.Timezone.value = tm.tz.value;
+
+ memset((s8 *) &info->AccessTimestamp, 0, sizeof(DATE_TIME_T));
+
+ info->NumSubdirs = 0;
+ info->Size = fsi->fs_func->get_entry_size(ep2);
+
+ if (fsi->vol_type == EXFAT)
+ release_dentry_set(es);
+ else
+ dcache_unlock(sb, sector);
+
+ if (is_dir) {
+ u32 dotcnt = 0;
+
+ dir.dir = fid->start_clu;
+ dir.flags = fid->flags;
+ dir.size = fid->size >> fsi->cluster_size_bits;
+ /*
+ * NOTE :
+ * If "dir.flags" has 0x01, "dir.size" is meaningless.
+ */
+#if 0
+ if (info->Size == 0) {
+ s32 num_clu;
+
+ if (__count_num_clusters(sb, &dir, &num_clu))
+ return -EIO;
+ info->Size = (u64)num_clu << fsi->cluster_size_bits;
+ }
+#endif
+ count = __count_dos_name_entries(sb, &dir, TYPE_DIR, &dotcnt);
+ if (count < 0)
+ return -EIO;
+
+ if (fsi->vol_type == EXFAT) {
+ count += SDFAT_MIN_SUBDIR;
+ } else {
+ /*
+ * if directory has been corrupted,
+ * we have to adjust subdir count.
+ */
+ BUG_ON(dotcnt > SDFAT_MIN_SUBDIR);
+ if (dotcnt < SDFAT_MIN_SUBDIR) {
+ EMSG("%s: contents of the directory has been "
+ "corrupted (parent clus : %08x, idx : %d)",
+ __func__, fid->dir.dir, fid->entry);
+ }
+ count += (SDFAT_MIN_SUBDIR - dotcnt);
+ }
+ info->NumSubdirs = count;
+ }
+
+ TMSG("%s exited successfully\n", __func__);
+ return 0;
+} /* end of fscore_read_inode */
+
+/* set the information of a given file
+ * REMARK : This function does not need any file name on linux
+ */
+s32 fscore_write_inode(struct inode *inode, DIR_ENTRY_T *info, s32 sync)
+{
+ s32 ret = -EIO;
+ u64 sector;
+ TIMESTAMP_T tm;
+ DENTRY_T *ep, *ep2;
+ ENTRY_SET_CACHE_T *es = NULL;
+ struct super_block *sb = inode->i_sb;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ FILE_ID_T *fid = &(SDFAT_I(inode)->fid);
+ u8 is_dir = (fid->type == TYPE_DIR) ? 1 : 0;
+
+
+ /* SKIP WRITING INODE :
+ * if the indoe is already unlinked,
+ * there is no need for updating inode
+ */
+ if (fid->dir.dir == DIR_DELETED)
+ return 0;
+
+ if (is_dir && (fid->dir.dir == fsi->root_dir) && (fid->entry == -1))
+ return 0;
+
+ fs_set_vol_flags(sb, VOL_DIRTY);
+
+ /* get the directory entry of given file or directory */
+ if (fsi->vol_type == EXFAT) {
+ es = get_dentry_set_in_dir(sb, &(fid->dir), fid->entry, ES_ALL_ENTRIES, &ep);
+ if (!es)
+ return -EIO;
+ ep2 = ep+1;
+ } else {
+ /* for other than exfat */
+ ep = get_dentry_in_dir(sb, &(fid->dir), fid->entry, §or);
+ if (!ep)
+ return -EIO;
+ ep2 = ep;
+ }
+
+
+ fsi->fs_func->set_entry_attr(ep, info->Attr);
+
+ /* set FILE_INFO structure using the acquired DENTRY_T */
+ tm.tz = info->CreateTimestamp.Timezone;
+ tm.sec = info->CreateTimestamp.Second;
+ tm.min = info->CreateTimestamp.Minute;
+ tm.hour = info->CreateTimestamp.Hour;
+ tm.day = info->CreateTimestamp.Day;
+ tm.mon = info->CreateTimestamp.Month;
+ tm.year = info->CreateTimestamp.Year;
+ fsi->fs_func->set_entry_time(ep, &tm, TM_CREATE);
+
+ tm.tz = info->ModifyTimestamp.Timezone;
+ tm.sec = info->ModifyTimestamp.Second;
+ tm.min = info->ModifyTimestamp.Minute;
+ tm.hour = info->ModifyTimestamp.Hour;
+ tm.day = info->ModifyTimestamp.Day;
+ tm.mon = info->ModifyTimestamp.Month;
+ tm.year = info->ModifyTimestamp.Year;
+ fsi->fs_func->set_entry_time(ep, &tm, TM_MODIFY);
+
+ if (is_dir && fsi->vol_type != EXFAT) {
+ /* overwirte dirsize if FAT32 and dir size != 0 */
+ if (fsi->fs_func->get_entry_size(ep2))
+ fsi->fs_func->set_entry_size(ep2, 0);
+ } else {
+ /* File size should be zero if there is no cluster allocated */
+ u64 on_disk_size = info->Size;
+
+ if (IS_CLUS_EOF(fid->start_clu))
+ on_disk_size = 0;
+
+ fsi->fs_func->set_entry_size(ep2, on_disk_size);
+ }
+
+ if (fsi->vol_type == EXFAT) {
+ ret = update_dir_chksum_with_entry_set(sb, es);
+ release_dentry_set(es);
+ } else {
+ ret = dcache_modify(sb, sector);
+ }
+
+ fs_sync(sb, sync);
+ /* Comment below code to prevent super block update frequently */
+ //fs_set_vol_flags(sb, VOL_CLEAN);
+
+ return ret;
+} /* end of fscore_write_inode */
+
+
+/*
+ * Input: inode, (logical) clu_offset, target allocation area
+ * Output: errcode, cluster number
+ * *clu = (~0), if it's unable to allocate a new cluster
+ */
+s32 fscore_map_clus(struct inode *inode, u32 clu_offset, u32 *clu, int dest)
+{
+ s32 ret, modified = false;
+ u32 last_clu;
+ u64 sector;
+ CHAIN_T new_clu;
+ DENTRY_T *ep;
+ ENTRY_SET_CACHE_T *es = NULL;
+ struct super_block *sb = inode->i_sb;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ FILE_ID_T *fid = &(SDFAT_I(inode)->fid);
+ u32 local_clu_offset = clu_offset;
+ s32 reserved_clusters = fsi->reserved_clusters;
+ u32 num_to_be_allocated = 0, num_clusters = 0;
+
+ fid->rwoffset = (s64)(clu_offset) << fsi->cluster_size_bits;
+
+ if (SDFAT_I(inode)->i_size_ondisk > 0)
+ num_clusters = (u32)((SDFAT_I(inode)->i_size_ondisk-1) >> fsi->cluster_size_bits) + 1;
+
+ if (clu_offset >= num_clusters)
+ num_to_be_allocated = clu_offset - num_clusters + 1;
+
+ if ((dest == ALLOC_NOWHERE) && (num_to_be_allocated > 0)) {
+ *clu = CLUS_EOF;
+ return 0;
+ }
+
+ /* check always request cluster is 1 */
+ //ASSERT(num_to_be_allocated == 1);
+
+ sdfat_debug_check_clusters(inode);
+
+ *clu = last_clu = fid->start_clu;
+
+ /* XXX: Defensive code needed.
+ * what if i_size_ondisk != # of allocated clusters
+ */
+ if (fid->flags == 0x03) {
+ if ((clu_offset > 0) && (!IS_CLUS_EOF(*clu))) {
+ last_clu += clu_offset - 1;
+
+ if (clu_offset == num_clusters)
+ *clu = CLUS_EOF;
+ else
+ *clu += clu_offset;
+ }
+ } else if (fid->type == TYPE_FILE) {
+ u32 fclus = 0;
+ s32 err = extent_get_clus(inode, clu_offset,
+ &fclus, clu, &last_clu, 1);
+ if (err)
+ return -EIO;
+
+ clu_offset -= fclus;
+ } else {
+ /* hint information */
+ if ((clu_offset > 0) &&
+ ((fid->hint_bmap.off != CLUS_EOF) && (fid->hint_bmap.off > 0)) &&
+ (clu_offset >= fid->hint_bmap.off)) {
+ clu_offset -= fid->hint_bmap.off;
+ /* hint_bmap.clu should be valid */
+ ASSERT(fid->hint_bmap.clu >= 2);
+ *clu = fid->hint_bmap.clu;
+ }
+
+ while ((clu_offset > 0) && (!IS_CLUS_EOF(*clu))) {
+ last_clu = *clu;
+ if (get_next_clus_safe(sb, clu))
+ return -EIO;
+ clu_offset--;
+ }
+ }
+
+ if (IS_CLUS_EOF(*clu)) {
+ fs_set_vol_flags(sb, VOL_DIRTY);
+
+ new_clu.dir = (IS_CLUS_EOF(last_clu)) ? CLUS_EOF : last_clu + 1;
+ new_clu.size = 0;
+ new_clu.flags = fid->flags;
+
+ /* (1) allocate a cluster */
+ if (num_to_be_allocated < 1) {
+ /* Broken FAT (i_sze > allocated FAT) */
+ EMSG("%s: invalid fat chain : inode(%p) "
+ "num_to_be_allocated(%d) "
+ "i_size_ondisk(%lld) fid->flags(%02x) "
+ "fid->start(%08x) fid->hint_off(%u) "
+ "fid->hint_clu(%u) fid->rwoffset(%llu) "
+ "modified_clu_off(%d) last_clu(%08x) "
+ "new_clu(%08x)", __func__, inode,
+ num_to_be_allocated,
+ (SDFAT_I(inode)->i_size_ondisk),
+ fid->flags, fid->start_clu,
+ fid->hint_bmap.off, fid->hint_bmap.clu,
+ fid->rwoffset, clu_offset,
+ last_clu, new_clu.dir);
+ sdfat_fs_error(sb, "broken FAT chain.");
+ return -EIO;
+ }
+
+ ret = fsi->fs_func->alloc_cluster(sb, num_to_be_allocated, &new_clu, ALLOC_COLD);
+ if (ret)
+ return ret;
+
+ if (IS_CLUS_EOF(new_clu.dir) || IS_CLUS_FREE(new_clu.dir)) {
+ sdfat_fs_error(sb, "bogus cluster new allocated"
+ "(last_clu : %u, new_clu : %u)",
+ last_clu, new_clu.dir);
+ ASSERT(0);
+ return -EIO;
+ }
+
+ /* Reserved cluster dec. */
+ // XXX: Inode DA flag needed
+ if (SDFAT_SB(sb)->options.improved_allocation & SDFAT_ALLOC_DELAY) {
+ BUG_ON(reserved_clusters < num_to_be_allocated);
+ reserved_clusters -= num_to_be_allocated;
+
+ }
+
+ /* (2) append to the FAT chain */
+ if (IS_CLUS_EOF(last_clu)) {
+ if (new_clu.flags == 0x01)
+ fid->flags = 0x01;
+ fid->start_clu = new_clu.dir;
+ modified = true;
+ } else {
+ if (new_clu.flags != fid->flags) {
+ /* no-fat-chain bit is disabled,
+ * so fat-chain should be synced with alloc-bmp
+ */
+ chain_cont_cluster(sb, fid->start_clu, num_clusters);
+ fid->flags = 0x01;
+ modified = true;
+ }
+ if (new_clu.flags == 0x01)
+ if (fat_ent_set(sb, last_clu, new_clu.dir))
+ return -EIO;
+ }
+
+ num_clusters += num_to_be_allocated;
+ *clu = new_clu.dir;
+
+ if (fid->dir.dir != DIR_DELETED) {
+
+ if (fsi->vol_type == EXFAT) {
+ es = get_dentry_set_in_dir(sb, &(fid->dir), fid->entry, ES_ALL_ENTRIES, &ep);
+ if (!es)
+ return -EIO;
+ /* get stream entry */
+ ep++;
+ }
+
+ /* (3) update directory entry */
+ if (modified) {
+ if (fsi->vol_type != EXFAT) {
+ ep = get_dentry_in_dir(sb, &(fid->dir), fid->entry, §or);
+ if (!ep)
+ return -EIO;
+ }
+
+ if (fsi->fs_func->get_entry_flag(ep) != fid->flags)
+ fsi->fs_func->set_entry_flag(ep, fid->flags);
+
+ if (fsi->fs_func->get_entry_clu0(ep) != fid->start_clu)
+ fsi->fs_func->set_entry_clu0(ep, fid->start_clu);
+
+ fsi->fs_func->set_entry_size(ep, fid->size);
+
+ if (fsi->vol_type != EXFAT) {
+ if (dcache_modify(sb, sector))
+ return -EIO;
+ }
+ }
+
+ if (fsi->vol_type == EXFAT) {
+ if (update_dir_chksum_with_entry_set(sb, es))
+ return -EIO;
+ release_dentry_set(es);
+ }
+
+ } /* end of if != DIR_DELETED */
+
+
+ /* add number of new blocks to inode (non-DA only) */
+ if (!(SDFAT_SB(sb)->options.improved_allocation & SDFAT_ALLOC_DELAY)) {
+ inode->i_blocks += num_to_be_allocated << (fsi->cluster_size_bits - sb->s_blocksize_bits);
+ } else {
+ // DA의 경우, i_blocks가 이미 증가해있어야 함.
+ BUG_ON(clu_offset >= (inode->i_blocks >> (fsi->cluster_size_bits - sb->s_blocksize_bits)));
+ }
+#if 0
+ fs_sync(sb, 0);
+ fs_set_vol_flags(sb, VOL_CLEAN);
+#endif
+ /* (4) Move *clu pointer along FAT chains (hole care)
+ * because the caller of this function expect *clu to be the last cluster.
+ * This only works when num_to_be_allocated >= 2,
+ * *clu = (the first cluster of the allocated chain) => (the last cluster of ...)
+ */
+ if (fid->flags == 0x03) {
+ *clu += num_to_be_allocated - 1;
+ } else {
+ while (num_to_be_allocated > 1) {
+ if (get_next_clus_safe(sb, clu))
+ return -EIO;
+ num_to_be_allocated--;
+ }
+ }
+
+ }
+
+ /* update reserved_clusters */
+ fsi->reserved_clusters = reserved_clusters;
+
+ /* hint information */
+ fid->hint_bmap.off = local_clu_offset;
+ fid->hint_bmap.clu = *clu;
+
+ return 0;
+} /* end of fscore_map_clus */
+
+/* allocate reserved cluster */
+s32 fscore_reserve_clus(struct inode *inode)
+{
+ struct super_block *sb = inode->i_sb;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ if ((fsi->used_clusters + fsi->reserved_clusters) >= (fsi->num_clusters - 2))
+ return -ENOSPC;
+
+ if (bdev_check_bdi_valid(sb))
+ return -EIO;
+
+ fsi->reserved_clusters++;
+
+ /* inode->i_blocks update */
+ inode->i_blocks += 1 << (fsi->cluster_size_bits - sb->s_blocksize_bits);
+
+ sdfat_debug_check_clusters(inode);
+
+ return 0;
+}
+
+/* remove an entry, BUT don't truncate */
+s32 fscore_unlink(struct inode *inode, FILE_ID_T *fid)
+{
+ s32 dentry;
+ CHAIN_T dir;
+ DENTRY_T *ep;
+ struct super_block *sb = inode->i_sb;
+
+ dir.dir = fid->dir.dir;
+ dir.size = fid->dir.size;
+ dir.flags = fid->dir.flags;
+
+ dentry = fid->entry;
+
+ if (fid->dir.dir == DIR_DELETED) {
+ EMSG("%s : abnormal access to deleted dentry\n", __func__);
+ return -ENOENT;
+ }
+
+ ep = get_dentry_in_dir(sb, &dir, dentry, NULL);
+ if (!ep)
+ return -EIO;
+
+#ifdef CONFIG_SDFAT_CHECK_RO_ATTR
+ if (SDFAT_SB(sb)->fsi.fs_func->get_entry_attr(ep) & ATTR_READONLY)
+ return -EPERM;
+#endif
+
+ fs_set_vol_flags(sb, VOL_DIRTY);
+
+ /* (1) update the directory entry */
+ if (remove_file(inode, &dir, dentry))
+ return -EIO;
+
+ /* This doesn't modify fid */
+ fid->dir.dir = DIR_DELETED;
+
+ fs_sync(sb, 0);
+ fs_set_vol_flags(sb, VOL_CLEAN);
+
+ return 0;
+}
+
+/*----------------------------------------------------------------------*/
+/* Directory Operation Functions */
+/*----------------------------------------------------------------------*/
+
+/* create a directory */
+s32 fscore_mkdir(struct inode *inode, u8 *path, FILE_ID_T *fid)
+{
+ s32 ret/*, dentry*/;
+ CHAIN_T dir;
+ UNI_NAME_T uni_name;
+ struct super_block *sb = inode->i_sb;
+
+ TMSG("%s entered\n", __func__);
+
+ /* check the validity of directory name in the given old pathname */
+ ret = resolve_path(inode, path, &dir, &uni_name);
+ if (ret)
+ goto out;
+
+ fs_set_vol_flags(sb, VOL_DIRTY);
+
+ ret = create_dir(inode, &dir, &uni_name, fid);
+
+ fs_sync(sb, 0);
+ fs_set_vol_flags(sb, VOL_CLEAN);
+out:
+ TMSG("%s exited with err(%d)\n", __func__, ret);
+ return ret;
+}
+
+/* read a directory entry from the opened directory */
+s32 fscore_readdir(struct inode *inode, DIR_ENTRY_T *dir_entry)
+{
+ s32 i;
+ s32 dentries_per_clu, dentries_per_clu_bits = 0;
+ u32 type, clu_offset;
+ u64 sector;
+ CHAIN_T dir, clu;
+ UNI_NAME_T uni_name;
+ TIMESTAMP_T tm;
+ DENTRY_T *ep;
+ struct super_block *sb = inode->i_sb;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ FILE_ID_T *fid = &(SDFAT_I(inode)->fid);
+ u32 dentry = (u32)(fid->rwoffset & 0xFFFFFFFF); /* u32 is enough for directory */
+
+ /* check if the given file ID is opened */
+ if (fid->type != TYPE_DIR)
+ return -EPERM;
+
+ if (fid->entry == -1) {
+ dir.dir = fsi->root_dir;
+ dir.size = 0; /* just initialize, but will not use */
+ dir.flags = 0x01;
+ } else {
+ dir.dir = fid->start_clu;
+ dir.size = fid->size >> fsi->cluster_size_bits;
+ dir.flags = fid->flags;
+ sdfat_debug_bug_on(dentry >= (dir.size * fsi->dentries_per_clu));
+ }
+
+ if (IS_CLUS_FREE(dir.dir)) { /* FAT16 root_dir */
+ dentries_per_clu = fsi->dentries_in_root;
+
+ /* Prevent readdir over directory size */
+ if (dentry >= dentries_per_clu) {
+ clu.dir = CLUS_EOF;
+ } else {
+ clu.dir = dir.dir;
+ clu.size = dir.size;
+ clu.flags = dir.flags;
+ }
+ } else {
+ dentries_per_clu = fsi->dentries_per_clu;
+ dentries_per_clu_bits = ilog2(dentries_per_clu);
+
+ clu_offset = dentry >> dentries_per_clu_bits;
+ clu.dir = dir.dir;
+ clu.size = dir.size;
+ clu.flags = dir.flags;
+
+ if (clu.flags == 0x03) {
+ clu.dir += clu_offset;
+ clu.size -= clu_offset;
+ } else {
+ /* hint_information */
+ if ((clu_offset > 0) &&
+ ((fid->hint_bmap.off != CLUS_EOF) && (fid->hint_bmap.off > 0)) &&
+ (clu_offset >= fid->hint_bmap.off)) {
+ clu_offset -= fid->hint_bmap.off;
+ clu.dir = fid->hint_bmap.clu;
+ }
+
+ while (clu_offset > 0) {
+ if (get_next_clus_safe(sb, &(clu.dir)))
+ return -EIO;
+
+ clu_offset--;
+ }
+ }
+ }
+
+ while (!IS_CLUS_EOF(clu.dir)) {
+ if (IS_CLUS_FREE(dir.dir)) /* FAT16 root_dir */
+ i = dentry % dentries_per_clu;
+ else
+ i = dentry & (dentries_per_clu-1);
+
+ for ( ; i < dentries_per_clu; i++, dentry++) {
+ ep = get_dentry_in_dir(sb, &clu, i, §or);
+ if (!ep)
+ return -EIO;
+
+ type = fsi->fs_func->get_entry_type(ep);
+
+ if (type == TYPE_UNUSED)
+ break;
+
+ if ((type != TYPE_FILE) && (type != TYPE_DIR))
+ continue;
+
+ dcache_lock(sb, sector);
+ dir_entry->Attr = fsi->fs_func->get_entry_attr(ep);
+
+ fsi->fs_func->get_entry_time(ep, &tm, TM_CREATE);
+ dir_entry->CreateTimestamp.Year = tm.year;
+ dir_entry->CreateTimestamp.Month = tm.mon;
+ dir_entry->CreateTimestamp.Day = tm.day;
+ dir_entry->CreateTimestamp.Hour = tm.hour;
+ dir_entry->CreateTimestamp.Minute = tm.min;
+ dir_entry->CreateTimestamp.Second = tm.sec;
+ dir_entry->CreateTimestamp.MilliSecond = 0;
+
+ fsi->fs_func->get_entry_time(ep, &tm, TM_MODIFY);
+ dir_entry->ModifyTimestamp.Year = tm.year;
+ dir_entry->ModifyTimestamp.Month = tm.mon;
+ dir_entry->ModifyTimestamp.Day = tm.day;
+ dir_entry->ModifyTimestamp.Hour = tm.hour;
+ dir_entry->ModifyTimestamp.Minute = tm.min;
+ dir_entry->ModifyTimestamp.Second = tm.sec;
+ dir_entry->ModifyTimestamp.MilliSecond = 0;
+
+ memset((s8 *) &dir_entry->AccessTimestamp, 0, sizeof(DATE_TIME_T));
+
+ *(uni_name.name) = 0x0;
+ fsi->fs_func->get_uniname_from_ext_entry(sb, &dir, dentry, uni_name.name);
+ if (*(uni_name.name) == 0x0)
+ get_uniname_from_dos_entry(sb, (DOS_DENTRY_T *) ep, &uni_name, 0x1);
+ nls_uni16s_to_vfsname(sb, &uni_name,
+ dir_entry->NameBuf.lfn,
+ dir_entry->NameBuf.lfnbuf_len);
+ dcache_unlock(sb, sector);
+
+ if (fsi->vol_type == EXFAT) {
+ ep = get_dentry_in_dir(sb, &clu, i+1, NULL);
+ if (!ep)
+ return -EIO;
+ } else {
+ get_uniname_from_dos_entry(sb, (DOS_DENTRY_T *) ep, &uni_name, 0x0);
+ nls_uni16s_to_vfsname(sb, &uni_name,
+ dir_entry->NameBuf.sfn,
+ dir_entry->NameBuf.sfnbuf_len);
+ }
+
+ dir_entry->Size = fsi->fs_func->get_entry_size(ep);
+
+ /*
+ * Update hint information :
+ * fat16 root directory does not need it.
+ */
+ if (!IS_CLUS_FREE(dir.dir)) {
+ fid->hint_bmap.off = dentry >> dentries_per_clu_bits;
+ fid->hint_bmap.clu = clu.dir;
+ }
+
+ fid->rwoffset = (s64) ++dentry;
+
+ return 0;
+ }
+
+ /* fat16 root directory */
+ if (IS_CLUS_FREE(dir.dir))
+ break;
+
+ if (clu.flags == 0x03) {
+ if ((--clu.size) > 0)
+ clu.dir++;
+ else
+ clu.dir = CLUS_EOF;
+ } else {
+ if (get_next_clus_safe(sb, &(clu.dir)))
+ return -EIO;
+ }
+ }
+
+ dir_entry->NameBuf.lfn[0] = '\0';
+
+ fid->rwoffset = (s64)dentry;
+
+ return 0;
+} /* end of fscore_readdir */
+
+/* remove a directory */
+s32 fscore_rmdir(struct inode *inode, FILE_ID_T *fid)
+{
+ s32 ret;
+ s32 dentry;
+ DENTRY_T *ep;
+ CHAIN_T dir, clu_to_free;
+ struct super_block *sb = inode->i_sb;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ dir.dir = fid->dir.dir;
+ dir.size = fid->dir.size;
+ dir.flags = fid->dir.flags;
+
+ dentry = fid->entry;
+
+ if (fid->dir.dir == DIR_DELETED) {
+ EMSG("%s : abnormal access to deleted dentry\n", __func__);
+ return -ENOENT;
+ }
+
+ /* check if the file is "." or ".." */
+ if (fsi->vol_type != EXFAT) {
+ if ((dir.dir != fsi->root_dir) && (dentry < 2))
+ return -EPERM;
+ }
+
+ ep = get_dentry_in_dir(sb, &dir, dentry, NULL);
+ if (!ep)
+ return -EIO;
+
+#ifdef CONFIG_SDFAT_CHECK_RO_ATTR
+ if (SDFAT_SB(sb)->fsi.fs_func->get_entry_attr(ep) & ATTR_READONLY)
+ return -EPERM;
+#endif
+
+ clu_to_free.dir = fid->start_clu;
+ clu_to_free.size = ((fid->size-1) >> fsi->cluster_size_bits) + 1;
+ clu_to_free.flags = fid->flags;
+
+ ret = check_dir_empty(sb, &clu_to_free);
+ if (ret) {
+ if (ret == -EIO)
+ EMSG("%s : failed to check_dir_empty : err(%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ fs_set_vol_flags(sb, VOL_DIRTY);
+
+ /* (1) update the directory entry */
+ ret = remove_file(inode, &dir, dentry);
+ if (ret) {
+ EMSG("%s : failed to remove_file : err(%d)\n", __func__, ret);
+ return ret;
+ }
+
+ fid->dir.dir = DIR_DELETED;
+
+ fs_sync(sb, 0);
+ fs_set_vol_flags(sb, VOL_CLEAN);
+
+ return ret;
+} /* end of fscore_rmdir */
+
+/* end of core.c */
--- /dev/null
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _SDFAT_CORE_H
+#define _SDFAT_CORE_H
+
+#include <asm/byteorder.h>
+
+#include "config.h"
+#include "api.h"
+#include "upcase.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/*----------------------------------------------------------------------*/
+/* Constant & Macro Definitions */
+/*----------------------------------------------------------------------*/
+#define get_next_clus(sb, pclu) fat_ent_get(sb, *(pclu), pclu)
+#define get_next_clus_safe(sb, pclu) fat_ent_get_safe(sb, *(pclu), pclu)
+
+/* file status */
+/* this prevents
+ * fscore_write_inode, fscore_map_clus, ... with the unlinked inodes
+ * from corrupting on-disk dentry data.
+ *
+ * The fid->dir value of unlinked inode will be DIR_DELETED
+ * and those functions must check if fid->dir is valid prior to
+ * the calling of get_dentry_in_dir()
+ */
+#define DIR_DELETED 0xFFFF0321
+
+/*----------------------------------------------------------------------*/
+/* Type Definitions */
+/*----------------------------------------------------------------------*/
+#define ES_2_ENTRIES 2
+#define ES_3_ENTRIES 3
+#define ES_ALL_ENTRIES 0
+
+typedef struct {
+ u64 sector; // sector number that contains file_entry
+ u32 offset; // byte offset in the sector
+ s32 alloc_flag; // flag in stream entry. 01 for cluster chain, 03 for contig. clusters.
+ u32 num_entries;
+ void *__buf; // __buf should be the last member
+} ENTRY_SET_CACHE_T;
+
+/*----------------------------------------------------------------------*/
+/* Inline Functions */
+/*----------------------------------------------------------------------*/
+static inline bool is_valid_clus(FS_INFO_T *fsi, u32 clus)
+{
+ if (clus < CLUS_BASE || fsi->num_clusters <= clus)
+ return false;
+ return true;
+}
+
+/*----------------------------------------------------------------------*/
+/* External Function Declarations */
+/*----------------------------------------------------------------------*/
+
+/* file system initialization & shutdown functions */
+s32 fscore_init(void);
+s32 fscore_shutdown(void);
+
+/* bdev management */
+s32 fscore_check_bdi_valid(struct super_block *sb);
+
+/* chain management */
+s32 chain_cont_cluster(struct super_block *sb, u32 chain, u32 len);
+
+/* volume management functions */
+s32 fscore_mount(struct super_block *sb);
+s32 fscore_umount(struct super_block *sb);
+s32 fscore_statfs(struct super_block *sb, VOL_INFO_T *info);
+s32 fscore_sync_fs(struct super_block *sb, s32 do_sync);
+s32 fscore_set_vol_flags(struct super_block *sb, u16 new_flag, s32 always_sync);
+u32 fscore_get_au_stat(struct super_block *sb, s32 mode);
+
+/* file management functions */
+s32 fscore_lookup(struct inode *inode, u8 *path, FILE_ID_T *fid);
+s32 fscore_create(struct inode *inode, u8 *path, u8 mode, FILE_ID_T *fid);
+s32 fscore_read_link(struct inode *inode, FILE_ID_T *fid, void *buffer, u64 count, u64 *rcount);
+s32 fscore_write_link(struct inode *inode, FILE_ID_T *fid, void *buffer, u64 count, u64 *wcount);
+s32 fscore_truncate(struct inode *inode, u64 old_size, u64 new_size);
+s32 fscore_rename(struct inode *old_parent_inode, FILE_ID_T *fid,
+ struct inode *new_parent_inode, struct dentry *new_dentry);
+s32 fscore_remove(struct inode *inode, FILE_ID_T *fid);
+s32 fscore_read_inode(struct inode *inode, DIR_ENTRY_T *info);
+s32 fscore_write_inode(struct inode *inode, DIR_ENTRY_T *info, int sync);
+s32 fscore_map_clus(struct inode *inode, u32 clu_offset, u32 *clu, int dest);
+s32 fscore_reserve_clus(struct inode *inode);
+s32 fscore_unlink(struct inode *inode, FILE_ID_T *fid);
+
+/* directory management functions */
+s32 fscore_mkdir(struct inode *inode, u8 *path, FILE_ID_T *fid);
+s32 fscore_readdir(struct inode *inode, DIR_ENTRY_T *dir_ent);
+s32 fscore_rmdir(struct inode *inode, FILE_ID_T *fid);
+
+
+/*----------------------------------------------------------------------*/
+/* External Function Declarations (NOT TO UPPER LAYER) */
+/*----------------------------------------------------------------------*/
+
+/* core.c : core code for common */
+/* dir entry management functions */
+DENTRY_T *get_dentry_in_dir(struct super_block *sb, CHAIN_T *p_dir, s32 entry, u64 *sector);
+
+/* name conversion functions */
+void get_uniname_from_dos_entry(struct super_block *sb, DOS_DENTRY_T *ep, UNI_NAME_T *p_uniname, u8 mode);
+
+/* file operation functions */
+s32 walk_fat_chain(struct super_block *sb, CHAIN_T *p_dir, u32 byte_offset, u32 *clu);
+
+/* sdfat/cache.c */
+s32 meta_cache_init(struct super_block *sb);
+s32 meta_cache_shutdown(struct super_block *sb);
+u8 *fcache_getblk(struct super_block *sb, u64 sec);
+s32 fcache_modify(struct super_block *sb, u64 sec);
+s32 fcache_release_all(struct super_block *sb);
+s32 fcache_flush(struct super_block *sb, u32 sync);
+
+u8 *dcache_getblk(struct super_block *sb, u64 sec);
+s32 dcache_modify(struct super_block *sb, u64 sec);
+s32 dcache_lock(struct super_block *sb, u64 sec);
+s32 dcache_unlock(struct super_block *sb, u64 sec);
+s32 dcache_release(struct super_block *sb, u64 sec);
+s32 dcache_release_all(struct super_block *sb);
+s32 dcache_flush(struct super_block *sb, u32 sync);
+s32 dcache_readahead(struct super_block *sb, u64 sec);
+
+
+/* fatent.c */
+s32 fat_ent_ops_init(struct super_block *sb);
+s32 fat_ent_get(struct super_block *sb, u32 loc, u32 *content);
+s32 fat_ent_set(struct super_block *sb, u32 loc, u32 content);
+s32 fat_ent_get_safe(struct super_block *sb, u32 loc, u32 *content);
+
+/* core_fat.c : core code for fat */
+s32 fat_generate_dos_name_new(struct super_block *sb, CHAIN_T *p_dir, DOS_NAME_T *p_dosname, s32 n_entries);
+s32 mount_fat16(struct super_block *sb, pbr_t *p_pbr);
+s32 mount_fat32(struct super_block *sb, pbr_t *p_pbr);
+
+/* core_exfat.c : core code for exfat */
+
+s32 load_alloc_bmp(struct super_block *sb);
+void free_alloc_bmp(struct super_block *sb);
+ENTRY_SET_CACHE_T *get_dentry_set_in_dir(struct super_block *sb,
+ CHAIN_T *p_dir, s32 entry, u32 type, DENTRY_T **file_ep);
+void release_dentry_set(ENTRY_SET_CACHE_T *es);
+s32 update_dir_chksum(struct super_block *sb, CHAIN_T *p_dir, s32 entry);
+s32 update_dir_chksum_with_entry_set(struct super_block *sb, ENTRY_SET_CACHE_T *es);
+bool is_dir_empty(struct super_block *sb, CHAIN_T *p_dir);
+s32 mount_exfat(struct super_block *sb, pbr_t *p_pbr);
+
+/* amap_smart.c : creation on mount / destroy on umount */
+int amap_create(struct super_block *sb, u32 pack_ratio, u32 sect_per_au, u32 hidden_sect);
+void amap_destroy(struct super_block *sb);
+
+/* amap_smart.c : (de)allocation functions */
+s32 amap_fat_alloc_cluster(struct super_block *sb, u32 num_alloc, CHAIN_T *p_chain, s32 dest);
+s32 amap_free_cluster(struct super_block *sb, CHAIN_T *p_chain, s32 do_relse);/* Not impelmented */
+s32 amap_release_cluster(struct super_block *sb, u32 clu); /* Only update AMAP */
+
+/* amap_smart.c : misc (for defrag) */
+s32 amap_mark_ignore(struct super_block *sb, u32 clu);
+s32 amap_unmark_ignore(struct super_block *sb, u32 clu);
+s32 amap_unmark_ignore_all(struct super_block *sb);
+s32 amap_check_working(struct super_block *sb, u32 clu);
+s32 amap_get_freeclus(struct super_block *sb, u32 clu);
+
+/* amap_smart.c : stat AU */
+u32 amap_get_au_stat(struct super_block *sb, s32 mode);
+
+
+/* blkdev.c */
+s32 bdev_open_dev(struct super_block *sb);
+s32 bdev_close_dev(struct super_block *sb);
+s32 bdev_check_bdi_valid(struct super_block *sb);
+s32 bdev_readahead(struct super_block *sb, u64 secno, u64 num_secs);
+s32 bdev_mread(struct super_block *sb, u64 secno, struct buffer_head **bh, u64 num_secs, s32 read);
+s32 bdev_mwrite(struct super_block *sb, u64 secno, struct buffer_head *bh, u64 num_secs, s32 sync);
+s32 bdev_sync_all(struct super_block *sb);
+
+/* blkdev.c : sector read/write functions */
+s32 read_sect(struct super_block *sb, u64 sec, struct buffer_head **bh, s32 read);
+s32 write_sect(struct super_block *sb, u64 sec, struct buffer_head *bh, s32 sync);
+s32 read_msect(struct super_block *sb, u64 sec, struct buffer_head **bh, s64 num_secs, s32 read);
+s32 write_msect(struct super_block *sb, u64 sec, struct buffer_head *bh, s64 num_secs, s32 sync);
+s32 write_msect_zero(struct super_block *sb, u64 sec, u64 num_secs);
+
+/* misc.c */
+u8 calc_chksum_1byte(void *data, s32 len, u8 chksum);
+u16 calc_chksum_2byte(void *data, s32 len, u16 chksum, s32 type);
+
+/* extent.c */
+s32 extent_cache_init(void);
+void extent_cache_shutdown(void);
+void extent_cache_init_inode(struct inode *inode);
+void extent_cache_inval_inode(struct inode *inode);
+s32 extent_get_clus(struct inode *inode, u32 cluster, u32 *fclus,
+ u32 *dclus, u32 *last_dclus, s32 allow_eof);
+/*----------------------------------------------------------------------*/
+/* Wrapper Function */
+/*----------------------------------------------------------------------*/
+void set_sb_dirty(struct super_block *sb);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _SDFAT_CORE_H */
+
+/* end of core.h */
--- /dev/null
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/************************************************************************/
+/* */
+/* PROJECT : exFAT & FAT12/16/32 File System */
+/* FILE : core_exfat.c */
+/* PURPOSE : exFAT-fs core code for sdFAT */
+/* */
+/*----------------------------------------------------------------------*/
+/* NOTES */
+/* */
+/* */
+/************************************************************************/
+
+#include <linux/version.h>
+#include <linux/blkdev.h>
+#include <linux/workqueue.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+
+#include "sdfat.h"
+#include "core.h"
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+/*----------------------------------------------------------------------*/
+/* Constant & Macro Definitions */
+/*----------------------------------------------------------------------*/
+
+/*----------------------------------------------------------------------*/
+/* Global Variable Definitions */
+/*----------------------------------------------------------------------*/
+
+/*----------------------------------------------------------------------*/
+/* Local Variable Definitions */
+/*----------------------------------------------------------------------*/
+static u8 free_bit[] = {
+ 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2,/* 0 ~ 19*/
+ 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3,/* 20 ~ 39*/
+ 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2,/* 40 ~ 59*/
+ 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4,/* 60 ~ 79*/
+ 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2,/* 80 ~ 99*/
+ 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3,/*100 ~ 119*/
+ 0, 1, 0, 2, 0, 1, 0, 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2,/*120 ~ 139*/
+ 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5,/*140 ~ 159*/
+ 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2,/*160 ~ 179*/
+ 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3,/*180 ~ 199*/
+ 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2,/*200 ~ 219*/
+ 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4,/*220 ~ 239*/
+ 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0 /*240 ~ 254*/
+};
+
+static u8 used_bit[] = {
+ 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3,/* 0 ~ 19*/
+ 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4,/* 20 ~ 39*/
+ 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5,/* 40 ~ 59*/
+ 4, 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,/* 60 ~ 79*/
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4,/* 80 ~ 99*/
+ 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6,/*100 ~ 119*/
+ 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4,/*120 ~ 139*/
+ 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,/*140 ~ 159*/
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5,/*160 ~ 179*/
+ 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5,/*180 ~ 199*/
+ 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6,/*200 ~ 219*/
+ 5, 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,/*220 ~ 239*/
+ 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8 /*240 ~ 255*/
+};
+
+
+/*======================================================================*/
+/* Local Function Definitions */
+/*======================================================================*/
+/*
+ * Directory Entry Management Functions
+ */
+static u32 exfat_get_entry_type(DENTRY_T *p_entry)
+{
+ FILE_DENTRY_T *ep = (FILE_DENTRY_T *) p_entry;
+
+ if (ep->type == EXFAT_UNUSED)
+ return TYPE_UNUSED;
+ if (ep->type < 0x80)
+ return TYPE_DELETED;
+ if (ep->type == 0x80)
+ return TYPE_INVALID;
+ if (ep->type < 0xA0) {
+ if (ep->type == 0x81)
+ return TYPE_BITMAP;
+ if (ep->type == 0x82)
+ return TYPE_UPCASE;
+ if (ep->type == 0x83)
+ return TYPE_VOLUME;
+ if (ep->type == 0x85) {
+ if (le16_to_cpu(ep->attr) & ATTR_SUBDIR)
+ return TYPE_DIR;
+ return TYPE_FILE;
+ }
+ return TYPE_CRITICAL_PRI;
+ }
+ if (ep->type < 0xC0) {
+ if (ep->type == 0xA0)
+ return TYPE_GUID;
+ if (ep->type == 0xA1)
+ return TYPE_PADDING;
+ if (ep->type == 0xA2)
+ return TYPE_ACLTAB;
+ return TYPE_BENIGN_PRI;
+ }
+ if (ep->type < 0xE0) {
+ if (ep->type == 0xC0)
+ return TYPE_STREAM;
+ if (ep->type == 0xC1)
+ return TYPE_EXTEND;
+ if (ep->type == 0xC2)
+ return TYPE_ACL;
+ return TYPE_CRITICAL_SEC;
+ }
+ return TYPE_BENIGN_SEC;
+} /* end of exfat_get_entry_type */
+
+static void exfat_set_entry_type(DENTRY_T *p_entry, u32 type)
+{
+ FILE_DENTRY_T *ep = (FILE_DENTRY_T *) p_entry;
+
+ if (type == TYPE_UNUSED) {
+ ep->type = 0x0;
+ } else if (type == TYPE_DELETED) {
+ ep->type &= ~0x80;
+ } else if (type == TYPE_STREAM) {
+ ep->type = 0xC0;
+ } else if (type == TYPE_EXTEND) {
+ ep->type = 0xC1;
+ } else if (type == TYPE_BITMAP) {
+ ep->type = 0x81;
+ } else if (type == TYPE_UPCASE) {
+ ep->type = 0x82;
+ } else if (type == TYPE_VOLUME) {
+ ep->type = 0x83;
+ } else if (type == TYPE_DIR) {
+ ep->type = 0x85;
+ ep->attr = cpu_to_le16(ATTR_SUBDIR);
+ } else if (type == TYPE_FILE) {
+ ep->type = 0x85;
+ ep->attr = cpu_to_le16(ATTR_ARCHIVE);
+ } else if (type == TYPE_SYMLINK) {
+ ep->type = 0x85;
+ ep->attr = cpu_to_le16(ATTR_ARCHIVE | ATTR_SYMLINK);
+ }
+} /* end of exfat_set_entry_type */
+
+static u32 exfat_get_entry_attr(DENTRY_T *p_entry)
+{
+ FILE_DENTRY_T *ep = (FILE_DENTRY_T *)p_entry;
+
+ return (u32)le16_to_cpu(ep->attr);
+} /* end of exfat_get_entry_attr */
+
+static void exfat_set_entry_attr(DENTRY_T *p_entry, u32 attr)
+{
+ FILE_DENTRY_T *ep = (FILE_DENTRY_T *)p_entry;
+
+ ep->attr = cpu_to_le16((u16) attr);
+} /* end of exfat_set_entry_attr */
+
+static u8 exfat_get_entry_flag(DENTRY_T *p_entry)
+{
+ STRM_DENTRY_T *ep = (STRM_DENTRY_T *)p_entry;
+
+ return ep->flags;
+} /* end of exfat_get_entry_flag */
+
+static void exfat_set_entry_flag(DENTRY_T *p_entry, u8 flags)
+{
+ STRM_DENTRY_T *ep = (STRM_DENTRY_T *)p_entry;
+
+ ep->flags = flags;
+} /* end of exfat_set_entry_flag */
+
+static u32 exfat_get_entry_clu0(DENTRY_T *p_entry)
+{
+ STRM_DENTRY_T *ep = (STRM_DENTRY_T *)p_entry;
+
+ return (u32)le32_to_cpu(ep->start_clu);
+} /* end of exfat_get_entry_clu0 */
+
+static void exfat_set_entry_clu0(DENTRY_T *p_entry, u32 start_clu)
+{
+ STRM_DENTRY_T *ep = (STRM_DENTRY_T *)p_entry;
+
+ ep->start_clu = cpu_to_le32(start_clu);
+} /* end of exfat_set_entry_clu0 */
+
+static u64 exfat_get_entry_size(DENTRY_T *p_entry)
+{
+ STRM_DENTRY_T *ep = (STRM_DENTRY_T *)p_entry;
+
+ return le64_to_cpu(ep->valid_size);
+} /* end of exfat_get_entry_size */
+
+static void exfat_set_entry_size(DENTRY_T *p_entry, u64 size)
+{
+ STRM_DENTRY_T *ep = (STRM_DENTRY_T *)p_entry;
+
+ ep->valid_size = cpu_to_le64(size);
+ ep->size = cpu_to_le64(size);
+} /* end of exfat_set_entry_size */
+
+
+#define TENS_MS_PER_SEC (100)
+#define SEC_TO_TENS_MS(sec) (((sec) & 0x01) ? TENS_MS_PER_SEC : 0)
+#define TENS_MS_TO_SEC(tens_ms) (((tens_ms) / TENS_MS_PER_SEC) ? 1 : 0)
+
+static void exfat_get_entry_time(DENTRY_T *p_entry, TIMESTAMP_T *tp, u8 mode)
+{
+ u16 t = 0x00, d = 0x21, tz = 0x00, s = 0x00;
+ FILE_DENTRY_T *ep = (FILE_DENTRY_T *)p_entry;
+
+ switch (mode) {
+ case TM_CREATE:
+ t = le16_to_cpu(ep->create_time);
+ d = le16_to_cpu(ep->create_date);
+ s = TENS_MS_TO_SEC(ep->create_time_ms);
+ tz = ep->create_tz;
+ break;
+ case TM_MODIFY:
+ t = le16_to_cpu(ep->modify_time);
+ d = le16_to_cpu(ep->modify_date);
+ s = TENS_MS_TO_SEC(ep->modify_time_ms);
+ tz = ep->modify_tz;
+ break;
+ case TM_ACCESS:
+ t = le16_to_cpu(ep->access_time);
+ d = le16_to_cpu(ep->access_date);
+ tz = ep->access_tz;
+ break;
+ }
+
+ tp->tz.value = tz;
+ tp->sec = ((t & 0x001F) << 1) + s;
+ tp->min = (t >> 5) & 0x003F;
+ tp->hour = (t >> 11);
+ tp->day = (d & 0x001F);
+ tp->mon = (d >> 5) & 0x000F;
+ tp->year = (d >> 9);
+} /* end of exfat_get_entry_time */
+
+static void exfat_set_entry_time(DENTRY_T *p_entry, TIMESTAMP_T *tp, u8 mode)
+{
+ u16 t, d;
+ FILE_DENTRY_T *ep = (FILE_DENTRY_T *)p_entry;
+
+ t = (tp->hour << 11) | (tp->min << 5) | (tp->sec >> 1);
+ d = (tp->year << 9) | (tp->mon << 5) | tp->day;
+
+ switch (mode) {
+ case TM_CREATE:
+ ep->create_time = cpu_to_le16(t);
+ ep->create_time_ms = SEC_TO_TENS_MS(tp->sec);
+ ep->create_date = cpu_to_le16(d);
+ ep->create_tz = tp->tz.value;
+ break;
+ case TM_MODIFY:
+ ep->modify_time = cpu_to_le16(t);
+ ep->modify_date = cpu_to_le16(d);
+ ep->modify_time_ms = (tp->sec & 0x1) ? TENS_MS_PER_SEC : 0;
+ ep->modify_tz = tp->tz.value;
+ break;
+ case TM_ACCESS:
+ ep->access_time = cpu_to_le16(t);
+ ep->access_date = cpu_to_le16(d);
+ ep->access_tz = tp->tz.value;
+ break;
+ }
+} /* end of exfat_set_entry_time */
+
+
+static void __init_file_entry(struct super_block *sb, FILE_DENTRY_T *ep, u32 type)
+{
+ TIMESTAMP_T tm, *tp;
+
+ exfat_set_entry_type((DENTRY_T *) ep, type);
+
+ tp = tm_now_sb(sb, &tm);
+ exfat_set_entry_time((DENTRY_T *) ep, tp, TM_CREATE);
+ exfat_set_entry_time((DENTRY_T *) ep, tp, TM_MODIFY);
+ exfat_set_entry_time((DENTRY_T *) ep, tp, TM_ACCESS);
+} /* end of __init_file_entry */
+
+static void __init_strm_entry(STRM_DENTRY_T *ep, u8 flags, u32 start_clu, u64 size)
+{
+ exfat_set_entry_type((DENTRY_T *) ep, TYPE_STREAM);
+ ep->flags = flags;
+ ep->start_clu = cpu_to_le32(start_clu);
+ ep->valid_size = cpu_to_le64(size);
+ ep->size = cpu_to_le64(size);
+} /* end of __init_strm_entry */
+
+static void __init_name_entry(NAME_DENTRY_T *ep, u16 *uniname)
+{
+ s32 i;
+
+ exfat_set_entry_type((DENTRY_T *) ep, TYPE_EXTEND);
+ ep->flags = 0x0;
+
+ for (i = 0; i < 15; i++) {
+ ep->unicode_0_14[i] = cpu_to_le16(*uniname);
+ if (*uniname == 0x0)
+ break;
+ uniname++;
+ }
+} /* end of __init_name_entry */
+
+static s32 exfat_init_dir_entry(struct super_block *sb, CHAIN_T *p_dir, s32 entry, u32 type, u32 start_clu, u64 size)
+{
+ u64 sector;
+ u8 flags;
+ FILE_DENTRY_T *file_ep;
+ STRM_DENTRY_T *strm_ep;
+
+ flags = (type == TYPE_FILE) ? 0x01 : 0x03;
+
+ /* we cannot use get_dentry_set_in_dir here because file ep is not initialized yet */
+ file_ep = (FILE_DENTRY_T *)get_dentry_in_dir(sb, p_dir, entry, §or);
+ if (!file_ep)
+ return -EIO;
+
+ strm_ep = (STRM_DENTRY_T *)get_dentry_in_dir(sb, p_dir, entry+1, §or);
+ if (!strm_ep)
+ return -EIO;
+
+ __init_file_entry(sb, file_ep, type);
+ if (dcache_modify(sb, sector))
+ return -EIO;
+
+ __init_strm_entry(strm_ep, flags, start_clu, size);
+ if (dcache_modify(sb, sector))
+ return -EIO;
+
+ return 0;
+} /* end of exfat_init_dir_entry */
+
+s32 update_dir_chksum(struct super_block *sb, CHAIN_T *p_dir, s32 entry)
+{
+ s32 ret = -EIO;
+ s32 i, num_entries;
+ u64 sector;
+ u16 chksum;
+ FILE_DENTRY_T *file_ep;
+ DENTRY_T *ep;
+
+ file_ep = (FILE_DENTRY_T *)get_dentry_in_dir(sb, p_dir, entry, §or);
+ if (!file_ep)
+ return -EIO;
+
+ dcache_lock(sb, sector);
+
+ num_entries = (s32) file_ep->num_ext + 1;
+ chksum = calc_chksum_2byte((void *) file_ep, DENTRY_SIZE, 0, CS_DIR_ENTRY);
+
+ for (i = 1; i < num_entries; i++) {
+ ep = get_dentry_in_dir(sb, p_dir, entry+i, NULL);
+ if (!ep)
+ goto out_unlock;
+
+ chksum = calc_chksum_2byte((void *) ep, DENTRY_SIZE, chksum, CS_DEFAULT);
+ }
+
+ file_ep->checksum = cpu_to_le16(chksum);
+ ret = dcache_modify(sb, sector);
+out_unlock:
+ dcache_unlock(sb, sector);
+ return ret;
+
+} /* end of update_dir_chksum */
+
+
+static s32 exfat_init_ext_entry(struct super_block *sb, CHAIN_T *p_dir, s32 entry, s32 num_entries,
+ UNI_NAME_T *p_uniname, DOS_NAME_T *p_dosname)
+{
+ s32 i;
+ u64 sector;
+ u16 *uniname = p_uniname->name;
+ FILE_DENTRY_T *file_ep;
+ STRM_DENTRY_T *strm_ep;
+ NAME_DENTRY_T *name_ep;
+
+ file_ep = (FILE_DENTRY_T *)get_dentry_in_dir(sb, p_dir, entry, §or);
+ if (!file_ep)
+ return -EIO;
+
+ file_ep->num_ext = (u8)(num_entries - 1);
+ dcache_modify(sb, sector);
+
+ strm_ep = (STRM_DENTRY_T *)get_dentry_in_dir(sb, p_dir, entry+1, §or);
+ if (!strm_ep)
+ return -EIO;
+
+ strm_ep->name_len = p_uniname->name_len;
+ strm_ep->name_hash = cpu_to_le16(p_uniname->name_hash);
+ dcache_modify(sb, sector);
+
+ for (i = 2; i < num_entries; i++) {
+ name_ep = (NAME_DENTRY_T *)get_dentry_in_dir(sb, p_dir, entry+i, §or);
+ if (!name_ep)
+ return -EIO;
+
+ __init_name_entry(name_ep, uniname);
+ dcache_modify(sb, sector);
+ uniname += 15;
+ }
+
+ update_dir_chksum(sb, p_dir, entry);
+
+ return 0;
+} /* end of exfat_init_ext_entry */
+
+
+static s32 exfat_delete_dir_entry(struct super_block *sb, CHAIN_T *p_dir, s32 entry, s32 order, s32 num_entries)
+{
+ s32 i;
+ u64 sector;
+ DENTRY_T *ep;
+
+ for (i = order; i < num_entries; i++) {
+ ep = get_dentry_in_dir(sb, p_dir, entry+i, §or);
+ if (!ep)
+ return -EIO;
+
+ exfat_set_entry_type(ep, TYPE_DELETED);
+ if (dcache_modify(sb, sector))
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static s32 __write_partial_entries_in_entry_set(struct super_block *sb,
+ ENTRY_SET_CACHE_T *es, u64 sec, u32 off, u32 count)
+{
+ s32 num_entries;
+ u32 buf_off = (off - es->offset);
+ u32 remaining_byte_in_sector, copy_entries;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ u32 clu;
+ u8 *buf, *esbuf = (u8 *)&(es->__buf);
+
+ TMSG("%s entered\n", __func__);
+ MMSG("%s: es %p sec %llu off %u cnt %d\n", __func__, es, sec, off, count);
+ num_entries = count;
+
+ while (num_entries) {
+ /* write per sector base */
+ remaining_byte_in_sector = (1 << sb->s_blocksize_bits) - off;
+ copy_entries = min((s32)(remaining_byte_in_sector >> DENTRY_SIZE_BITS), num_entries);
+ buf = dcache_getblk(sb, sec);
+ if (!buf)
+ goto err_out;
+ MMSG("es->buf %p buf_off %u\n", esbuf, buf_off);
+ MMSG("copying %d entries from %p to sector %llu\n", copy_entries, (esbuf + buf_off), sec);
+ memcpy(buf + off, esbuf + buf_off, copy_entries << DENTRY_SIZE_BITS);
+ dcache_modify(sb, sec);
+ num_entries -= copy_entries;
+
+ if (num_entries) {
+ // get next sector
+ if (IS_LAST_SECT_IN_CLUS(fsi, sec)) {
+ clu = SECT_TO_CLUS(fsi, sec);
+ if (es->alloc_flag == 0x03)
+ clu++;
+ else if (get_next_clus_safe(sb, &clu))
+ goto err_out;
+ sec = CLUS_TO_SECT(fsi, clu);
+ } else {
+ sec++;
+ }
+ off = 0;
+ buf_off += copy_entries << DENTRY_SIZE_BITS;
+ }
+ }
+
+ TMSG("%s exited successfully\n", __func__);
+ return 0;
+err_out:
+ TMSG("%s failed\n", __func__);
+ return -EIO;
+}
+
+/* write back all entries in entry set */
+static s32 __write_whole_entry_set(struct super_block *sb, ENTRY_SET_CACHE_T *es)
+{
+ return __write_partial_entries_in_entry_set(sb, es, es->sector, es->offset, es->num_entries);
+}
+
+s32 update_dir_chksum_with_entry_set(struct super_block *sb, ENTRY_SET_CACHE_T *es)
+{
+ DENTRY_T *ep;
+ u16 chksum = 0;
+ s32 chksum_type = CS_DIR_ENTRY, i;
+
+ ep = (DENTRY_T *)&(es->__buf);
+ for (i = 0; i < es->num_entries; i++) {
+ MMSG("%s %p\n", __func__, ep);
+ chksum = calc_chksum_2byte((void *) ep, DENTRY_SIZE, chksum, chksum_type);
+ ep++;
+ chksum_type = CS_DEFAULT;
+ }
+
+ ep = (DENTRY_T *)&(es->__buf);
+ ((FILE_DENTRY_T *)ep)->checksum = cpu_to_le16(chksum);
+ return __write_whole_entry_set(sb, es);
+}
+
+/* returns a set of dentries for a file or dir.
+ * Note that this is a copy (dump) of dentries so that user should call write_entry_set()
+ * to apply changes made in this entry set to the real device.
+ * in:
+ * sb+p_dir+entry: indicates a file/dir
+ * type: specifies how many dentries should be included.
+ * out:
+ * file_ep: will point the first dentry(= file dentry) on success
+ * return:
+ * pointer of entry set on success,
+ * NULL on failure.
+ */
+
+#define ES_MODE_STARTED 0
+#define ES_MODE_GET_FILE_ENTRY 1
+#define ES_MODE_GET_STRM_ENTRY 2
+#define ES_MODE_GET_NAME_ENTRY 3
+#define ES_MODE_GET_CRITICAL_SEC_ENTRY 4
+ENTRY_SET_CACHE_T *get_dentry_set_in_dir(struct super_block *sb,
+ CHAIN_T *p_dir, s32 entry, u32 type, DENTRY_T **file_ep)
+{
+ s32 ret;
+ u32 off, byte_offset, clu = 0;
+ u32 entry_type;
+ u64 sec;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ ENTRY_SET_CACHE_T *es = NULL;
+ DENTRY_T *ep, *pos;
+ u8 *buf;
+ u8 num_entries;
+ s32 mode = ES_MODE_STARTED;
+
+ /* FIXME : is available in error case? */
+ if (p_dir->dir == DIR_DELETED) {
+ EMSG("%s : access to deleted dentry\n", __func__);
+ BUG_ON(!fsi->prev_eio);
+ return NULL;
+ }
+
+ TMSG("%s entered\n", __func__);
+ MMSG("p_dir dir %u flags %x size %d\n", p_dir->dir, p_dir->flags, p_dir->size);
+ MMSG("entry %d type %d\n", entry, type);
+
+ byte_offset = entry << DENTRY_SIZE_BITS;
+ ret = walk_fat_chain(sb, p_dir, byte_offset, &clu);
+ if (ret)
+ return NULL;
+
+ /* byte offset in cluster */
+ byte_offset &= fsi->cluster_size - 1;
+
+ /* byte offset in sector */
+ off = byte_offset & (u32)(sb->s_blocksize - 1);
+
+ /* sector offset in cluster */
+ sec = byte_offset >> (sb->s_blocksize_bits);
+ sec += CLUS_TO_SECT(fsi, clu);
+
+ buf = dcache_getblk(sb, sec);
+ if (!buf)
+ goto err_out;
+
+ ep = (DENTRY_T *)(buf + off);
+ entry_type = exfat_get_entry_type(ep);
+
+ if ((entry_type != TYPE_FILE)
+ && (entry_type != TYPE_DIR))
+ goto err_out;
+
+ if (type == ES_ALL_ENTRIES)
+ num_entries = ((FILE_DENTRY_T *)ep)->num_ext+1;
+ else
+ num_entries = type;
+
+ MMSG("trying to malloc %lx bytes for %d entries\n",
+ (unsigned long)(offsetof(ENTRY_SET_CACHE_T, __buf) + (num_entries) * sizeof(DENTRY_T)), num_entries);
+ es = kmalloc((offsetof(ENTRY_SET_CACHE_T, __buf) + (num_entries) * sizeof(DENTRY_T)), GFP_KERNEL);
+ if (!es) {
+ EMSG("%s: failed to alloc entryset\n", __func__);
+ goto err_out;
+ }
+
+ es->num_entries = num_entries;
+ es->sector = sec;
+ es->offset = off;
+ es->alloc_flag = p_dir->flags;
+
+ pos = (DENTRY_T *) &(es->__buf);
+
+ while (num_entries) {
+ // instead of copying whole sector, we will check every entry.
+ // this will provide minimum stablity and consistency.
+ entry_type = exfat_get_entry_type(ep);
+
+ if ((entry_type == TYPE_UNUSED) || (entry_type == TYPE_DELETED))
+ goto err_out;
+
+ switch (mode) {
+ case ES_MODE_STARTED:
+ if ((entry_type == TYPE_FILE) || (entry_type == TYPE_DIR))
+ mode = ES_MODE_GET_FILE_ENTRY;
+ else
+ goto err_out;
+ break;
+ case ES_MODE_GET_FILE_ENTRY:
+ if (entry_type == TYPE_STREAM)
+ mode = ES_MODE_GET_STRM_ENTRY;
+ else
+ goto err_out;
+ break;
+ case ES_MODE_GET_STRM_ENTRY:
+ if (entry_type == TYPE_EXTEND)
+ mode = ES_MODE_GET_NAME_ENTRY;
+ else
+ goto err_out;
+ break;
+ case ES_MODE_GET_NAME_ENTRY:
+ if (entry_type == TYPE_EXTEND)
+ break;
+ else if (entry_type == TYPE_STREAM)
+ goto err_out;
+ else if (entry_type & TYPE_CRITICAL_SEC)
+ mode = ES_MODE_GET_CRITICAL_SEC_ENTRY;
+ else
+ goto err_out;
+ break;
+ case ES_MODE_GET_CRITICAL_SEC_ENTRY:
+ if ((entry_type == TYPE_EXTEND) || (entry_type == TYPE_STREAM))
+ goto err_out;
+ else if ((entry_type & TYPE_CRITICAL_SEC) != TYPE_CRITICAL_SEC)
+ goto err_out;
+ break;
+ }
+
+ /* copy dentry */
+ memcpy(pos, ep, sizeof(DENTRY_T));
+
+ if (--num_entries == 0)
+ break;
+
+ if (((off + DENTRY_SIZE) & (u32)(sb->s_blocksize - 1)) <
+ (off & (u32)(sb->s_blocksize - 1))) {
+ // get the next sector
+ if (IS_LAST_SECT_IN_CLUS(fsi, sec)) {
+ if (es->alloc_flag == 0x03)
+ clu++;
+ else if (get_next_clus_safe(sb, &clu))
+ goto err_out;
+ sec = CLUS_TO_SECT(fsi, clu);
+ } else {
+ sec++;
+ }
+ buf = dcache_getblk(sb, sec);
+ if (!buf)
+ goto err_out;
+ off = 0;
+ ep = (DENTRY_T *)(buf);
+ } else {
+ ep++;
+ off += DENTRY_SIZE;
+ }
+ pos++;
+ }
+
+ if (file_ep)
+ *file_ep = (DENTRY_T *)&(es->__buf);
+
+ MMSG("es sec %llu offset %u flags %d, num_entries %u buf ptr %p\n",
+ es->sector, es->offset, es->alloc_flag, es->num_entries, &(es->__buf));
+ TMSG("%s exited %p\n", __func__, es);
+ return es;
+err_out:
+ TMSG("%s exited (return NULL) (es %p)\n", __func__, es);
+
+ /* kfree(NULL) is safe */
+ kfree(es);
+ es = NULL;
+ return NULL;
+}
+
+void release_dentry_set(ENTRY_SET_CACHE_T *es)
+{
+ TMSG("%s %p\n", __func__, es);
+
+ /* kfree(NULL) is safe */
+ kfree(es);
+ es = NULL;
+}
+
+static s32 __extract_uni_name_from_name_entry(NAME_DENTRY_T *ep, u16 *uniname, s32 order)
+{
+ s32 i, len = 0;
+
+ for (i = 0; i < 15; i++) {
+ /* FIXME : unaligned? */
+ *uniname = le16_to_cpu(ep->unicode_0_14[i]);
+ if (*uniname == 0x0)
+ return len;
+ uniname++;
+ len++;
+ }
+
+ *uniname = 0x0;
+ return len;
+
+} /* end of __extract_uni_name_from_name_entry */
+
+#define DIRENT_STEP_FILE (0)
+#define DIRENT_STEP_STRM (1)
+#define DIRENT_STEP_NAME (2)
+#define DIRENT_STEP_SECD (3)
+
+/* return values of exfat_find_dir_entry()
+ * >= 0 : return dir entiry position with the name in dir
+ * -EEXIST : (root dir, ".") it is the root dir itself
+ * -ENOENT : entry with the name does not exist
+ * -EIO : I/O error
+ */
+static s32 exfat_find_dir_entry(struct super_block *sb, FILE_ID_T *fid,
+ CHAIN_T *p_dir, UNI_NAME_T *p_uniname, s32 num_entries, DOS_NAME_T *unused, u32 type)
+{
+ s32 i, rewind = 0, dentry = 0, end_eidx = 0, num_ext = 0, len;
+ s32 order, step, name_len;
+ s32 dentries_per_clu, num_empty = 0;
+ u32 entry_type;
+ u16 entry_uniname[16], *uniname = NULL, unichar;
+ CHAIN_T clu;
+ DENTRY_T *ep;
+ HINT_T *hint_stat = &fid->hint_stat;
+ HINT_FEMP_T candi_empty;
+ FILE_DENTRY_T *file_ep;
+ STRM_DENTRY_T *strm_ep;
+ NAME_DENTRY_T *name_ep;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ /*
+ * REMARK:
+ * DOT and DOTDOT are handled by VFS layer
+ */
+
+ if (IS_CLUS_FREE(p_dir->dir))
+ return -EIO;
+
+ dentries_per_clu = fsi->dentries_per_clu;
+
+ clu.dir = p_dir->dir;
+ clu.size = p_dir->size;
+ clu.flags = p_dir->flags;
+
+ if (hint_stat->eidx) {
+ clu.dir = hint_stat->clu;
+ dentry = hint_stat->eidx;
+ end_eidx = dentry;
+ }
+
+ candi_empty.eidx = -1;
+rewind:
+ order = 0;
+ step = DIRENT_STEP_FILE;
+ while (!IS_CLUS_EOF(clu.dir)) {
+ i = dentry & (dentries_per_clu - 1);
+ for (; i < dentries_per_clu; i++, dentry++) {
+ if (rewind && (dentry == end_eidx))
+ goto not_found;
+
+ ep = get_dentry_in_dir(sb, &clu, i, NULL);
+ if (!ep)
+ return -EIO;
+
+ entry_type = exfat_get_entry_type(ep);
+
+ if ((entry_type == TYPE_UNUSED) || (entry_type == TYPE_DELETED)) {
+ step = DIRENT_STEP_FILE;
+
+ num_empty++;
+ if (candi_empty.eidx == -1) {
+ if (num_empty == 1) {
+ candi_empty.cur.dir = clu.dir;
+ candi_empty.cur.size = clu.size;
+ candi_empty.cur.flags = clu.flags;
+ }
+
+ if (num_empty >= num_entries) {
+ candi_empty.eidx = dentry - (num_empty - 1);
+ ASSERT(0 <= candi_empty.eidx);
+ candi_empty.count = num_empty;
+
+ if ((fid->hint_femp.eidx == -1) ||
+ (candi_empty.eidx <= fid->hint_femp.eidx)) {
+ memcpy(&fid->hint_femp,
+ &candi_empty,
+ sizeof(HINT_FEMP_T));
+ }
+ }
+ }
+
+ if (entry_type == TYPE_UNUSED)
+ goto not_found;
+ continue;
+ }
+
+ num_empty = 0;
+ candi_empty.eidx = -1;
+
+ if ((entry_type == TYPE_FILE) || (entry_type == TYPE_DIR)) {
+ step = DIRENT_STEP_FILE;
+ if ((type == TYPE_ALL) || (type == entry_type)) {
+ file_ep = (FILE_DENTRY_T *) ep;
+ num_ext = file_ep->num_ext;
+ step = DIRENT_STEP_STRM;
+ }
+ continue;
+ }
+
+ if (entry_type == TYPE_STREAM) {
+ if (step != DIRENT_STEP_STRM) {
+ step = DIRENT_STEP_FILE;
+ continue;
+ }
+ step = DIRENT_STEP_FILE;
+ strm_ep = (STRM_DENTRY_T *) ep;
+ if ((p_uniname->name_hash == le16_to_cpu(strm_ep->name_hash)) &&
+ (p_uniname->name_len == strm_ep->name_len)) {
+ step = DIRENT_STEP_NAME;
+ order = 1;
+ name_len = 0;
+ }
+ continue;
+ }
+
+ if (entry_type == TYPE_EXTEND) {
+ if (step != DIRENT_STEP_NAME) {
+ step = DIRENT_STEP_FILE;
+ continue;
+ }
+ name_ep = (NAME_DENTRY_T *) ep;
+
+ if ((++order) == 2)
+ uniname = p_uniname->name;
+ else
+ uniname += 15;
+
+ len = __extract_uni_name_from_name_entry(name_ep, entry_uniname, order);
+ name_len += len;
+
+ unichar = *(uniname+len);
+ *(uniname+len) = 0x0;
+
+ if (nls_cmp_uniname(sb, uniname, entry_uniname)) {
+ step = DIRENT_STEP_FILE;
+ } else if (name_len == p_uniname->name_len) {
+ if (order == num_ext) {
+ //fid->hint_femp.eidx = -1;
+ goto found;
+ }
+ step = DIRENT_STEP_SECD;
+ }
+
+ *(uniname+len) = unichar;
+ continue;
+ }
+
+ if (entry_type & (TYPE_CRITICAL_SEC | TYPE_BENIGN_SEC)) {
+ if (step == DIRENT_STEP_SECD) {
+ if (++order == num_ext)
+ goto found;
+ continue;
+ }
+ }
+ step = DIRENT_STEP_FILE;
+ }
+
+ if (clu.flags == 0x03) {
+ if ((--clu.size) > 0)
+ clu.dir++;
+ else
+ clu.dir = CLUS_EOF;
+ } else {
+ if (get_next_clus_safe(sb, &clu.dir))
+ return -EIO;
+ }
+ }
+
+not_found:
+ /* we started at not 0 index,so we should try to find target
+ * from 0 index to the index we started at.
+ */
+ if (!rewind && end_eidx) {
+ rewind = 1;
+ dentry = 0;
+ clu.dir = p_dir->dir;
+ /* reset empty hint */
+ num_empty = 0;
+ candi_empty.eidx = -1;
+ goto rewind;
+ }
+
+ /* initialized hint_stat */
+ hint_stat->clu = p_dir->dir;
+ hint_stat->eidx = 0;
+ return -ENOENT;
+
+found:
+ /* next dentry we'll find is out of this cluster */
+ if (!((dentry + 1) & (dentries_per_clu-1))) {
+ int ret = 0;
+
+ if (clu.flags == 0x03) {
+ if ((--clu.size) > 0)
+ clu.dir++;
+ else
+ clu.dir = CLUS_EOF;
+ } else {
+ ret = get_next_clus_safe(sb, &clu.dir);
+ }
+
+ if (ret || IS_CLUS_EOF(clu.dir)) {
+ /* just initialized hint_stat */
+ hint_stat->clu = p_dir->dir;
+ hint_stat->eidx = 0;
+ return (dentry - num_ext);
+ }
+ }
+
+ hint_stat->clu = clu.dir;
+ hint_stat->eidx = dentry + 1;
+ return (dentry - num_ext);
+} /* end of exfat_find_dir_entry */
+
+/* returns -EIO on error */
+static s32 exfat_count_ext_entries(struct super_block *sb, CHAIN_T *p_dir, s32 entry, DENTRY_T *p_entry)
+{
+ s32 i, count = 0;
+ u32 type;
+ FILE_DENTRY_T *file_ep = (FILE_DENTRY_T *) p_entry;
+ DENTRY_T *ext_ep;
+
+ for (i = 0, entry++; i < file_ep->num_ext; i++, entry++) {
+ ext_ep = get_dentry_in_dir(sb, p_dir, entry, NULL);
+ if (!ext_ep)
+ return -EIO;
+
+ type = exfat_get_entry_type(ext_ep);
+ if ((type == TYPE_EXTEND) || (type == TYPE_STREAM))
+ count++;
+ else
+ return count;
+ }
+
+ return count;
+} /* end of exfat_count_ext_entries */
+
+
+/*
+ * Name Conversion Functions
+ */
+static void exfat_get_uniname_from_ext_entry(struct super_block *sb, CHAIN_T *p_dir, s32 entry, u16 *uniname)
+{
+ s32 i;
+ DENTRY_T *ep;
+ ENTRY_SET_CACHE_T *es;
+
+ es = get_dentry_set_in_dir(sb, p_dir, entry, ES_ALL_ENTRIES, &ep);
+ if (!es)
+ return;
+
+ if (es->num_entries < 3)
+ goto out;
+
+ ep += 2;
+
+ /*
+ * First entry : file entry
+ * Second entry : stream-extension entry
+ * Third entry : first file-name entry
+ * So, the index of first file-name dentry should start from 2.
+ */
+ for (i = 2; i < es->num_entries; i++, ep++) {
+ /* end of name entry */
+ if (exfat_get_entry_type(ep) != TYPE_EXTEND)
+ goto out;
+
+ __extract_uni_name_from_name_entry((NAME_DENTRY_T *)ep, uniname, i);
+ uniname += 15;
+ }
+
+out:
+ release_dentry_set(es);
+} /* end of exfat_get_uniname_from_ext_entry */
+
+static s32 exfat_calc_num_entries(UNI_NAME_T *p_uniname)
+{
+ s32 len;
+
+ len = p_uniname->name_len;
+ if (len == 0)
+ return 0;
+
+ /* 1 file entry + 1 stream entry + name entries */
+ return((len-1) / 15 + 3);
+
+} /* end of exfat_calc_num_entries */
+
+static s32 exfat_check_max_dentries(FILE_ID_T *fid)
+{
+ if ((fid->size >> DENTRY_SIZE_BITS) >= MAX_EXFAT_DENTRIES) {
+ /* exFAT spec allows a dir to grow upto 8388608(256MB) dentries */
+ return -ENOSPC;
+ }
+ return 0;
+} /* end of check_max_dentries */
+
+/*
+ * Allocation Bitmap Management Functions
+ */
+s32 load_alloc_bmp(struct super_block *sb)
+{
+ s32 ret;
+ u32 i, j, map_size, type, need_map_size;
+ u64 sector;
+ CHAIN_T clu;
+ BMAP_DENTRY_T *ep;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ clu.dir = fsi->root_dir;
+ clu.flags = 0x01;
+
+ while (!IS_CLUS_EOF(clu.dir)) {
+ for (i = 0; i < fsi->dentries_per_clu; i++) {
+ ep = (BMAP_DENTRY_T *) get_dentry_in_dir(sb, &clu, i, NULL);
+ if (!ep)
+ return -EIO;
+
+ type = exfat_get_entry_type((DENTRY_T *) ep);
+
+ if (type == TYPE_UNUSED)
+ break;
+ if (type != TYPE_BITMAP)
+ continue;
+
+ if (ep->flags == 0x0) {
+ fsi->map_clu = le32_to_cpu(ep->start_clu);
+ map_size = (u32) le64_to_cpu(ep->size);
+
+ need_map_size = (((fsi->num_clusters - CLUS_BASE) - 1) >> 3) + 1;
+ if (need_map_size != map_size) {
+ sdfat_log_msg(sb, KERN_ERR,
+ "bogus allocation bitmap size(need : %u, cur : %u)",
+ need_map_size, map_size);
+ /* Only allowed when bogus allocation bitmap size is large */
+ if (need_map_size > map_size)
+ return -EIO;
+ }
+ fsi->map_sectors = ((need_map_size - 1) >> (sb->s_blocksize_bits)) + 1;
+ fsi->vol_amap =
+ kmalloc((sizeof(struct buffer_head *) * fsi->map_sectors), GFP_KERNEL);
+ if (!fsi->vol_amap)
+ return -ENOMEM;
+
+ sector = CLUS_TO_SECT(fsi, fsi->map_clu);
+
+ for (j = 0; j < fsi->map_sectors; j++) {
+ fsi->vol_amap[j] = NULL;
+ ret = read_sect(sb, sector+j, &(fsi->vol_amap[j]), 1);
+ if (ret) {
+ /* release all buffers and free vol_amap */
+ i = 0;
+ while (i < j)
+ brelse(fsi->vol_amap[i++]);
+
+ /* kfree(NULL) is safe */
+ kfree(fsi->vol_amap);
+ fsi->vol_amap = NULL;
+ return ret;
+ }
+ }
+
+ fsi->pbr_bh = NULL;
+ return 0;
+ }
+ }
+
+ if (get_next_clus_safe(sb, &clu.dir))
+ return -EIO;
+ }
+
+ return -EINVAL;
+} /* end of load_alloc_bmp */
+
+void free_alloc_bmp(struct super_block *sb)
+{
+ s32 i;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ brelse(fsi->pbr_bh);
+
+ for (i = 0; i < fsi->map_sectors; i++)
+ __brelse(fsi->vol_amap[i]);
+
+ /* kfree(NULL) is safe */
+ kfree(fsi->vol_amap);
+ fsi->vol_amap = NULL;
+}
+
+/* WARN :
+ * If the value of "clu" is 0, it means cluster 2 which is
+ * the first cluster of cluster heap.
+ */
+static s32 set_alloc_bitmap(struct super_block *sb, u32 clu)
+{
+ s32 i, b;
+ u64 sector;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ i = clu >> (sb->s_blocksize_bits + 3);
+ b = clu & (u32)((sb->s_blocksize << 3) - 1);
+
+ sector = CLUS_TO_SECT(fsi, fsi->map_clu) + i;
+ bitmap_set((unsigned long *)(fsi->vol_amap[i]->b_data), b, 1);
+
+ return write_sect(sb, sector, fsi->vol_amap[i], 0);
+} /* end of set_alloc_bitmap */
+
+/* WARN :
+ * If the value of "clu" is 0, it means cluster 2 which is
+ * the first cluster of cluster heap.
+ */
+static s32 clr_alloc_bitmap(struct super_block *sb, u32 clu)
+{
+ s32 ret;
+ s32 i, b;
+ u64 sector;
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ struct sdfat_mount_options *opts = &sbi->options;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ i = clu >> (sb->s_blocksize_bits + 3);
+ b = clu & (u32)((sb->s_blocksize << 3) - 1);
+
+ sector = CLUS_TO_SECT(fsi, fsi->map_clu) + i;
+
+ bitmap_clear((unsigned long *)(fsi->vol_amap[i]->b_data), b, 1);
+
+ ret = write_sect(sb, sector, fsi->vol_amap[i], 0);
+
+ if (opts->discard) {
+ s32 ret_discard;
+
+ TMSG("discard cluster(%08x)\n", clu+2);
+ ret_discard = sb_issue_discard(sb, CLUS_TO_SECT(fsi, clu+2),
+ (1 << fsi->sect_per_clus_bits), GFP_NOFS, 0);
+
+ if (ret_discard == -EOPNOTSUPP) {
+ sdfat_msg(sb, KERN_ERR,
+ "discard not supported by device, disabling");
+ opts->discard = 0;
+ }
+ }
+
+ return ret;
+} /* end of clr_alloc_bitmap */
+
+/* WARN :
+ * If the value of "clu" is 0, it means cluster 2 which is
+ * the first cluster of cluster heap.
+ */
+static u32 test_alloc_bitmap(struct super_block *sb, u32 clu)
+{
+ u32 i, map_i, map_b;
+ u32 clu_base, clu_free;
+ u8 k, clu_mask;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ clu_base = (clu & ~(0x7)) + 2;
+ clu_mask = (1 << (clu - clu_base + 2)) - 1;
+
+ map_i = clu >> (sb->s_blocksize_bits + 3);
+ map_b = (clu >> 3) & (u32)(sb->s_blocksize - 1);
+
+ for (i = 2; i < fsi->num_clusters; i += 8) {
+ k = *(((u8 *) fsi->vol_amap[map_i]->b_data) + map_b);
+ if (clu_mask > 0) {
+ k |= clu_mask;
+ clu_mask = 0;
+ }
+ if (k < 0xFF) {
+ clu_free = clu_base + free_bit[k];
+ if (clu_free < fsi->num_clusters)
+ return clu_free;
+ }
+ clu_base += 8;
+
+ if (((++map_b) >= (u32)sb->s_blocksize) ||
+ (clu_base >= fsi->num_clusters)) {
+ if ((++map_i) >= fsi->map_sectors) {
+ clu_base = 2;
+ map_i = 0;
+ }
+ map_b = 0;
+ }
+ }
+
+ return CLUS_EOF;
+} /* end of test_alloc_bitmap */
+
+void sync_alloc_bmp(struct super_block *sb)
+{
+ s32 i;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ if (fsi->vol_amap == NULL)
+ return;
+
+ for (i = 0; i < fsi->map_sectors; i++)
+ sync_dirty_buffer(fsi->vol_amap[i]);
+}
+
+static s32 exfat_chain_cont_cluster(struct super_block *sb, u32 chain, u32 len)
+{
+ if (!len)
+ return 0;
+
+ while (len > 1) {
+ if (fat_ent_set(sb, chain, chain+1))
+ return -EIO;
+ chain++;
+ len--;
+ }
+
+ if (fat_ent_set(sb, chain, CLUS_EOF))
+ return -EIO;
+ return 0;
+}
+
+s32 chain_cont_cluster(struct super_block *sb, u32 chain, u32 len)
+{
+ return exfat_chain_cont_cluster(sb, chain, len);
+}
+
+
+static s32 exfat_free_cluster(struct super_block *sb, CHAIN_T *p_chain, s32 do_relse)
+{
+ s32 ret = -EIO;
+ u32 num_clusters = 0;
+ u32 clu;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ s32 i;
+ u64 sector;
+
+ /* invalid cluster number */
+ if (IS_CLUS_FREE(p_chain->dir) || IS_CLUS_EOF(p_chain->dir))
+ return 0;
+
+ /* no cluster to truncate */
+ if (p_chain->size == 0) {
+ DMSG("%s: cluster(%u) truncation is not required.",
+ __func__, p_chain->dir);
+ return 0;
+ }
+
+ /* check cluster validation */
+ if (!is_valid_clus(fsi, p_chain->dir)) {
+ EMSG("%s: invalid start cluster (%u)\n", __func__, p_chain->dir);
+ sdfat_debug_bug_on(1);
+ return -EIO;
+ }
+
+ set_sb_dirty(sb);
+ clu = p_chain->dir;
+
+ if (p_chain->flags == 0x03) {
+ do {
+ if (do_relse) {
+ sector = CLUS_TO_SECT(fsi, clu);
+ for (i = 0; i < fsi->sect_per_clus; i++) {
+ if (dcache_release(sb, sector+i) == -EIO)
+ goto out;
+ }
+ }
+
+ if (clr_alloc_bitmap(sb, clu-2))
+ goto out;
+ clu++;
+
+ num_clusters++;
+ } while (num_clusters < p_chain->size);
+ } else {
+ do {
+ if (do_relse) {
+ sector = CLUS_TO_SECT(fsi, clu);
+ for (i = 0; i < fsi->sect_per_clus; i++) {
+ if (dcache_release(sb, sector+i) == -EIO)
+ goto out;
+ }
+ }
+
+ if (clr_alloc_bitmap(sb, (clu - CLUS_BASE)))
+ goto out;
+
+ if (get_next_clus_safe(sb, &clu))
+ goto out;
+
+ num_clusters++;
+ } while (!IS_CLUS_EOF(clu));
+ }
+
+ /* success */
+ ret = 0;
+out:
+
+ fsi->used_clusters -= num_clusters;
+ return ret;
+} /* end of exfat_free_cluster */
+
+static s32 exfat_alloc_cluster(struct super_block *sb, u32 num_alloc, CHAIN_T *p_chain, s32 dest)
+{
+ s32 ret = -ENOSPC;
+ u32 num_clusters = 0, total_cnt;
+ u32 hint_clu, new_clu, last_clu = CLUS_EOF;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ total_cnt = fsi->num_clusters - CLUS_BASE;
+
+ if (unlikely(total_cnt < fsi->used_clusters)) {
+ sdfat_fs_error_ratelimit(sb,
+ "%s: invalid used clusters(t:%u,u:%u)\n",
+ __func__, total_cnt, fsi->used_clusters);
+ return -EIO;
+ }
+
+ if (num_alloc > total_cnt - fsi->used_clusters)
+ return -ENOSPC;
+
+ hint_clu = p_chain->dir;
+ /* find new cluster */
+ if (IS_CLUS_EOF(hint_clu)) {
+ if (fsi->clu_srch_ptr < CLUS_BASE) {
+ EMSG("%s: fsi->clu_srch_ptr is invalid (%u)\n",
+ __func__, fsi->clu_srch_ptr);
+ ASSERT(0);
+ fsi->clu_srch_ptr = CLUS_BASE;
+ }
+
+ hint_clu = test_alloc_bitmap(sb, fsi->clu_srch_ptr - CLUS_BASE);
+ if (IS_CLUS_EOF(hint_clu))
+ return -ENOSPC;
+ }
+
+ /* check cluster validation */
+ if (!is_valid_clus(fsi, hint_clu)) {
+ /* "last + 1" can be passed as hint_clu. Otherwise, bug_on */
+ if (hint_clu != fsi->num_clusters) {
+ EMSG("%s: hint_cluster is invalid (%u)\n",
+ __func__, hint_clu);
+ sdfat_debug_bug_on(1);
+ }
+ hint_clu = CLUS_BASE;
+ if (p_chain->flags == 0x03) {
+ if (exfat_chain_cont_cluster(sb, p_chain->dir, num_clusters))
+ return -EIO;
+ p_chain->flags = 0x01;
+ }
+ }
+
+ set_sb_dirty(sb);
+
+ p_chain->dir = CLUS_EOF;
+
+ while ((new_clu = test_alloc_bitmap(sb, hint_clu - CLUS_BASE)) != CLUS_EOF) {
+ if ((new_clu != hint_clu) && (p_chain->flags == 0x03)) {
+ if (exfat_chain_cont_cluster(sb, p_chain->dir, num_clusters)) {
+ ret = -EIO;
+ goto error;
+ }
+ p_chain->flags = 0x01;
+ }
+
+ /* update allocation bitmap */
+ if (set_alloc_bitmap(sb, new_clu - CLUS_BASE)) {
+ ret = -EIO;
+ goto error;
+ }
+
+ num_clusters++;
+
+ /* update FAT table */
+ if (p_chain->flags == 0x01) {
+ if (fat_ent_set(sb, new_clu, CLUS_EOF)) {
+ ret = -EIO;
+ goto error;
+ }
+ }
+
+ if (IS_CLUS_EOF(p_chain->dir)) {
+ p_chain->dir = new_clu;
+ } else if (p_chain->flags == 0x01) {
+ if (fat_ent_set(sb, last_clu, new_clu)) {
+ ret = -EIO;
+ goto error;
+ }
+ }
+ last_clu = new_clu;
+
+ if ((--num_alloc) == 0) {
+ fsi->clu_srch_ptr = hint_clu;
+ fsi->used_clusters += num_clusters;
+
+ p_chain->size += num_clusters;
+ return 0;
+ }
+
+ hint_clu = new_clu + 1;
+ if (hint_clu >= fsi->num_clusters) {
+ hint_clu = CLUS_BASE;
+
+ if (p_chain->flags == 0x03) {
+ if (exfat_chain_cont_cluster(sb, p_chain->dir, num_clusters)) {
+ ret = -EIO;
+ goto error;
+ }
+ p_chain->flags = 0x01;
+ }
+ }
+ }
+error:
+ if (num_clusters)
+ exfat_free_cluster(sb, p_chain, 0);
+ return ret;
+} /* end of exfat_alloc_cluster */
+
+static s32 exfat_count_used_clusters(struct super_block *sb, u32 *ret_count)
+{
+ u32 count = 0;
+ u32 i, map_i, map_b;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ u32 total_clus = fsi->num_clusters - 2;
+
+ map_i = map_b = 0;
+
+ for (i = 0; i < total_clus; i += 8) {
+ u8 k = *(((u8 *) fsi->vol_amap[map_i]->b_data) + map_b);
+
+ count += used_bit[k];
+ if ((++map_b) >= (u32)sb->s_blocksize) {
+ map_i++;
+ map_b = 0;
+ }
+ }
+
+ /* FIXME : abnormal bitmap count should be handled as more smart */
+ if (total_clus < count)
+ count = total_clus;
+
+ *ret_count = count;
+ return 0;
+} /* end of exfat_count_used_clusters */
+
+
+/*
+ * File Operation Functions
+ */
+static FS_FUNC_T exfat_fs_func = {
+ .alloc_cluster = exfat_alloc_cluster,
+ .free_cluster = exfat_free_cluster,
+ .count_used_clusters = exfat_count_used_clusters,
+
+ .init_dir_entry = exfat_init_dir_entry,
+ .init_ext_entry = exfat_init_ext_entry,
+ .find_dir_entry = exfat_find_dir_entry,
+ .delete_dir_entry = exfat_delete_dir_entry,
+ .get_uniname_from_ext_entry = exfat_get_uniname_from_ext_entry,
+ .count_ext_entries = exfat_count_ext_entries,
+ .calc_num_entries = exfat_calc_num_entries,
+ .check_max_dentries = exfat_check_max_dentries,
+
+ .get_entry_type = exfat_get_entry_type,
+ .set_entry_type = exfat_set_entry_type,
+ .get_entry_attr = exfat_get_entry_attr,
+ .set_entry_attr = exfat_set_entry_attr,
+ .get_entry_flag = exfat_get_entry_flag,
+ .set_entry_flag = exfat_set_entry_flag,
+ .get_entry_clu0 = exfat_get_entry_clu0,
+ .set_entry_clu0 = exfat_set_entry_clu0,
+ .get_entry_size = exfat_get_entry_size,
+ .set_entry_size = exfat_set_entry_size,
+ .get_entry_time = exfat_get_entry_time,
+ .set_entry_time = exfat_set_entry_time,
+};
+
+s32 mount_exfat(struct super_block *sb, pbr_t *p_pbr)
+{
+ pbr64_t *p_bpb = (pbr64_t *)p_pbr;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ fsi->sect_per_clus = 1 << p_bpb->bsx.sect_per_clus_bits;
+ fsi->sect_per_clus_bits = p_bpb->bsx.sect_per_clus_bits;
+ fsi->cluster_size_bits = fsi->sect_per_clus_bits + sb->s_blocksize_bits;
+ fsi->cluster_size = 1 << fsi->cluster_size_bits;
+
+ if (!p_bpb->bsx.num_fats) {
+ sdfat_msg(sb, KERN_ERR, "bogus number of FAT structure");
+ return -EINVAL;
+ }
+
+ if (p_bpb->bsx.num_fats >= 2) {
+ sdfat_msg(sb, KERN_WARNING,
+ "unsupported number of FAT structure :%u, try with 1",
+ p_bpb->bsx.num_fats);
+ }
+
+ fsi->num_FAT_sectors = le32_to_cpu(p_bpb->bsx.fat_length);
+ if (!fsi->num_FAT_sectors) {
+ sdfat_msg(sb, KERN_ERR, "bogus fat size");
+ return -EINVAL;
+ }
+
+ fsi->FAT1_start_sector = le32_to_cpu(p_bpb->bsx.fat_offset);
+ fsi->FAT2_start_sector = fsi->FAT1_start_sector;
+
+ fsi->root_start_sector = le32_to_cpu(p_bpb->bsx.clu_offset);
+ fsi->data_start_sector = fsi->root_start_sector;
+
+ fsi->num_sectors = le64_to_cpu(p_bpb->bsx.vol_length);
+ if (!fsi->num_sectors) {
+ sdfat_msg(sb, KERN_ERR, "bogus number of total sector count");
+ return -EINVAL;
+ }
+
+ /* because the cluster index starts with 2 */
+ fsi->num_clusters = le32_to_cpu(p_bpb->bsx.clu_count) + CLUS_BASE;
+
+ fsi->vol_id = le32_to_cpu(p_bpb->bsx.vol_serial);
+ fsi->root_dir = le32_to_cpu(p_bpb->bsx.root_cluster);
+ fsi->dentries_in_root = 0;
+ fsi->dentries_per_clu = 1 << (fsi->cluster_size_bits - DENTRY_SIZE_BITS);
+ fsi->vol_flag = (u32) le16_to_cpu(p_bpb->bsx.vol_flags);
+ fsi->clu_srch_ptr = CLUS_BASE;
+ fsi->used_clusters = (u32) ~0;
+
+ fsi->fs_func = &exfat_fs_func;
+ fsi->vol_type = EXFAT;
+ fat_ent_ops_init(sb);
+
+ if (p_bpb->bsx.vol_flags & VOL_DIRTY) {
+ fsi->vol_flag |= VOL_DIRTY;
+ sdfat_log_msg(sb, KERN_WARNING, "Volume was not properly "
+ "unmounted. Some data may be corrupt. "
+ "Please run fsck.");
+ }
+
+ return 0;
+} /* end of mount_exfat */
+
+/* end of core_exfat.c */
--- /dev/null
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/************************************************************************/
+/* */
+/* PROJECT : exFAT & FAT12/16/32 File System */
+/* FILE : core_fat.c */
+/* PURPOSE : FAT-fs core code for sdFAT */
+/* */
+/*----------------------------------------------------------------------*/
+/* NOTES */
+/* */
+/* */
+/************************************************************************/
+
+#include <linux/version.h>
+#include <linux/blkdev.h>
+#include <linux/workqueue.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+
+#include "sdfat.h"
+#include "core.h"
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+/*----------------------------------------------------------------------*/
+/* Constant & Macro Definitions */
+/*----------------------------------------------------------------------*/
+#define MAX_LFN_ORDER (20)
+
+/*
+ * MAX_EST_AU_SECT should be changed according to 32/64bits.
+ * On 32bit, 4KB page supports 512 clusters per AU.
+ * But, on 64bit, 4KB page can handle a half of total list_head of 32bit's.
+ * Bcause the size of list_head structure on 64bit increases twofold over 32bit.
+ */
+#if (BITS_PER_LONG == 64)
+//#define MAX_EST_AU_SECT (16384) /* upto 8MB */
+#define MAX_EST_AU_SECT (32768) /* upto 16MB, used more page for list_head */
+#else
+#define MAX_EST_AU_SECT (32768) /* upto 16MB */
+#endif
+
+/*======================================================================*/
+/* Local Function Declarations */
+/*======================================================================*/
+static s32 __extract_uni_name_from_ext_entry(EXT_DENTRY_T *, u16 *, s32);
+
+/*----------------------------------------------------------------------*/
+/* Global Variable Definitions */
+/*----------------------------------------------------------------------*/
+
+/*----------------------------------------------------------------------*/
+/* Local Variable Definitions */
+/*----------------------------------------------------------------------*/
+
+/*======================================================================*/
+/* Local Function Definitions */
+/*======================================================================*/
+static u32 __calc_default_au_size(struct super_block *sb)
+{
+ struct block_device *bdev = sb->s_bdev;
+ struct gendisk *disk;
+ struct request_queue *queue;
+ struct queue_limits *limit;
+ unsigned int est_au_sect = MAX_EST_AU_SECT;
+ unsigned int est_au_size = 0;
+ unsigned int queue_au_size = 0;
+ sector_t total_sect = 0;
+
+ /* we assumed that sector size is 512 bytes */
+
+ disk = bdev->bd_disk;
+ if (!disk)
+ goto out;
+
+ queue = disk->queue;
+ if (!queue)
+ goto out;
+
+ limit = &queue->limits;
+ queue_au_size = limit->discard_granularity;
+
+ /* estimate function(x) =
+ * (total_sect / 2) * 512 / 1024
+ * => (total_sect >> 1) >> 1)
+ * => (total_sect >> 2)
+ * => estimated bytes size
+ *
+ * ex1) <= 8GB -> 4MB
+ * ex2) 16GB -> 8MB
+ * ex3) >= 32GB -> 16MB
+ */
+ total_sect = disk->part0.nr_sects;
+ est_au_size = total_sect >> 2;
+
+ /* au_size assumed that bytes per sector is 512 */
+ est_au_sect = est_au_size >> 9;
+
+ MMSG("DBG1: total_sect(%llu) est_au_size(%u) est_au_sect(%u)\n",
+ (u64)total_sect, est_au_size, est_au_sect);
+
+ if (est_au_sect <= 8192) {
+ /* 4MB */
+ est_au_sect = 8192;
+ } else if (est_au_sect <= 16384) {
+ /* 8MB */
+ est_au_sect = 16384;
+ } else {
+ /* 8MB or 16MB */
+ est_au_sect = MAX_EST_AU_SECT;
+ }
+
+ MMSG("DBG2: total_sect(%llu) est_au_size(%u) est_au_sect(%u)\n",
+ (u64)total_sect, est_au_size, est_au_sect);
+
+ if (est_au_size < queue_au_size &&
+ queue_au_size <= (MAX_EST_AU_SECT << 9)) {
+ DMSG("use queue_au_size(%u) instead of est_au_size(%u)\n",
+ queue_au_size, est_au_size);
+ est_au_sect = queue_au_size >> 9;
+ }
+
+out:
+ if (sb->s_blocksize != 512) {
+ ASSERT(sb->s_blocksize_bits > 9);
+ sdfat_log_msg(sb, KERN_INFO,
+ "adjustment est_au_size by logical block size(%lu)",
+ sb->s_blocksize);
+ est_au_sect >>= (sb->s_blocksize_bits - 9);
+ }
+
+ sdfat_log_msg(sb, KERN_INFO, "set default AU sectors : %u "
+ "(queue_au_size : %u KB, disk_size : %llu MB)",
+ est_au_sect, queue_au_size >> 10, (u64)(total_sect >> 11));
+ return est_au_sect;
+}
+
+
+/*
+ * Cluster Management Functions
+ */
+static s32 fat_free_cluster(struct super_block *sb, CHAIN_T *p_chain, s32 do_relse)
+{
+ s32 ret = -EIO;
+ s32 num_clusters = 0;
+ u32 clu, prev;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ s32 i;
+ u64 sector;
+
+ /* invalid cluster number */
+ if (IS_CLUS_FREE(p_chain->dir) || IS_CLUS_EOF(p_chain->dir))
+ return 0;
+
+ /* no cluster to truncate */
+ if (!p_chain->size) {
+ DMSG("%s: cluster(%u) truncation is not required.",
+ __func__, p_chain->dir);
+ return 0;
+ }
+
+ /* check cluster validation */
+ if (!is_valid_clus(fsi, p_chain->dir)) {
+ EMSG("%s: invalid start cluster (%u)\n", __func__, p_chain->dir);
+ sdfat_debug_bug_on(1);
+ return -EIO;
+ }
+
+
+ set_sb_dirty(sb);
+ clu = p_chain->dir;
+
+ do {
+ if (do_relse) {
+ sector = CLUS_TO_SECT(fsi, clu);
+ for (i = 0; i < fsi->sect_per_clus; i++) {
+ if (dcache_release(sb, sector+i) == -EIO)
+ goto out;
+ }
+ }
+
+ prev = clu;
+ if (get_next_clus_safe(sb, &clu)) {
+ /* print more helpful log */
+ if (IS_CLUS_BAD(clu)) {
+ sdfat_log_msg(sb, KERN_ERR, "%s : "
+ "deleting bad cluster (clu[%u]->BAD)",
+ __func__, prev);
+ } else if (IS_CLUS_FREE(clu)) {
+ sdfat_log_msg(sb, KERN_ERR, "%s : "
+ "deleting free cluster (clu[%u]->FREE)",
+ __func__, prev);
+ }
+ goto out;
+ }
+
+ /* Free FAT chain */
+ if (fat_ent_set(sb, prev, CLUS_FREE))
+ goto out;
+
+ /* Update AMAP if needed */
+ if (fsi->amap) {
+ if (amap_release_cluster(sb, prev))
+ return -EIO;
+ }
+
+ num_clusters++;
+
+ } while (!IS_CLUS_EOF(clu));
+
+ /* success */
+ ret = 0;
+out:
+ fsi->used_clusters -= num_clusters;
+ return ret;
+} /* end of fat_free_cluster */
+
+static s32 fat_alloc_cluster(struct super_block *sb, u32 num_alloc, CHAIN_T *p_chain, s32 dest)
+{
+ s32 ret = -ENOSPC;
+ u32 i, num_clusters = 0, total_cnt;
+ u32 new_clu, last_clu = CLUS_EOF, read_clu;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ total_cnt = fsi->num_clusters - CLUS_BASE;
+
+ if (unlikely(total_cnt < fsi->used_clusters)) {
+ sdfat_fs_error_ratelimit(sb,
+ "%s : invalid used clusters(t:%u,u:%u)\n",
+ __func__, total_cnt, fsi->used_clusters);
+ return -EIO;
+ }
+
+ if (num_alloc > total_cnt - fsi->used_clusters)
+ return -ENOSPC;
+
+ new_clu = p_chain->dir;
+ if (IS_CLUS_EOF(new_clu))
+ new_clu = fsi->clu_srch_ptr;
+ else if (new_clu >= fsi->num_clusters)
+ new_clu = CLUS_BASE;
+
+ set_sb_dirty(sb);
+
+ p_chain->dir = CLUS_EOF;
+
+ for (i = CLUS_BASE; i < fsi->num_clusters; i++) {
+ if (fat_ent_get(sb, new_clu, &read_clu)) {
+ ret = -EIO;
+ goto error;
+ }
+
+ if (IS_CLUS_FREE(read_clu)) {
+ if (fat_ent_set(sb, new_clu, CLUS_EOF)) {
+ ret = -EIO;
+ goto error;
+ }
+ num_clusters++;
+
+ if (IS_CLUS_EOF(p_chain->dir)) {
+ p_chain->dir = new_clu;
+ } else {
+ if (fat_ent_set(sb, last_clu, new_clu)) {
+ ret = -EIO;
+ goto error;
+ }
+ }
+
+ last_clu = new_clu;
+
+ if ((--num_alloc) == 0) {
+ fsi->clu_srch_ptr = new_clu;
+ fsi->used_clusters += num_clusters;
+
+ return 0;
+ }
+ }
+ if ((++new_clu) >= fsi->num_clusters)
+ new_clu = CLUS_BASE;
+ }
+error:
+ if (num_clusters)
+ fat_free_cluster(sb, p_chain, 0);
+ return ret;
+} /* end of fat_alloc_cluster */
+
+static s32 fat_count_used_clusters(struct super_block *sb, u32 *ret_count)
+{
+ s32 i;
+ u32 clu, count = 0;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ for (i = CLUS_BASE; i < fsi->num_clusters; i++) {
+ if (fat_ent_get(sb, i, &clu))
+ return -EIO;
+
+ if (!IS_CLUS_FREE(clu))
+ count++;
+ }
+
+ *ret_count = count;
+ return 0;
+} /* end of fat_count_used_clusters */
+
+
+/*
+ * Directory Entry Management Functions
+ */
+static u32 fat_get_entry_type(DENTRY_T *p_entry)
+{
+ DOS_DENTRY_T *ep = (DOS_DENTRY_T *)p_entry;
+
+ /* first byte of 32bytes dummy */
+ if (*(ep->name) == MSDOS_UNUSED)
+ return TYPE_UNUSED;
+
+ /* 0xE5 of Kanji Japanese is replaced to 0x05 */
+ else if (*(ep->name) == MSDOS_DELETED)
+ return TYPE_DELETED;
+
+ /* 11th byte of 32bytes dummy */
+ else if ((ep->attr & ATTR_EXTEND_MASK) == ATTR_EXTEND)
+ return TYPE_EXTEND;
+
+ else if (!(ep->attr & (ATTR_SUBDIR | ATTR_VOLUME)))
+ return TYPE_FILE;
+
+ else if ((ep->attr & (ATTR_SUBDIR | ATTR_VOLUME)) == ATTR_SUBDIR)
+ return TYPE_DIR;
+
+ else if ((ep->attr & (ATTR_SUBDIR | ATTR_VOLUME)) == ATTR_VOLUME)
+ return TYPE_VOLUME;
+
+ return TYPE_INVALID;
+} /* end of fat_get_entry_type */
+
+static void fat_set_entry_type(DENTRY_T *p_entry, u32 type)
+{
+ DOS_DENTRY_T *ep = (DOS_DENTRY_T *)p_entry;
+
+ if (type == TYPE_UNUSED)
+ *(ep->name) = MSDOS_UNUSED; /* 0x0 */
+
+ else if (type == TYPE_DELETED)
+ *(ep->name) = MSDOS_DELETED; /* 0xE5 */
+
+ else if (type == TYPE_EXTEND)
+ ep->attr = ATTR_EXTEND;
+
+ else if (type == TYPE_DIR)
+ ep->attr = ATTR_SUBDIR;
+
+ else if (type == TYPE_FILE)
+ ep->attr = ATTR_ARCHIVE;
+
+ else if (type == TYPE_SYMLINK)
+ ep->attr = ATTR_ARCHIVE | ATTR_SYMLINK;
+} /* end of fat_set_entry_type */
+
+static u32 fat_get_entry_attr(DENTRY_T *p_entry)
+{
+ DOS_DENTRY_T *ep = (DOS_DENTRY_T *)p_entry;
+
+ return (u32)ep->attr;
+} /* end of fat_get_entry_attr */
+
+static void fat_set_entry_attr(DENTRY_T *p_entry, u32 attr)
+{
+ DOS_DENTRY_T *ep = (DOS_DENTRY_T *)p_entry;
+
+ ep->attr = (u8)attr;
+} /* end of fat_set_entry_attr */
+
+static u8 fat_get_entry_flag(DENTRY_T *p_entry)
+{
+ return 0x01;
+} /* end of fat_get_entry_flag */
+
+static void fat_set_entry_flag(DENTRY_T *p_entry, u8 flags)
+{
+} /* end of fat_set_entry_flag */
+
+static u32 fat_get_entry_clu0(DENTRY_T *p_entry)
+{
+ DOS_DENTRY_T *ep = (DOS_DENTRY_T *)p_entry;
+ /* FIXME : is ok? */
+ return(((u32)(le16_to_cpu(ep->start_clu_hi)) << 16) | le16_to_cpu(ep->start_clu_lo));
+} /* end of fat_get_entry_clu0 */
+
+static void fat_set_entry_clu0(DENTRY_T *p_entry, u32 start_clu)
+{
+ DOS_DENTRY_T *ep = (DOS_DENTRY_T *)p_entry;
+
+ ep->start_clu_lo = cpu_to_le16(CLUSTER_16(start_clu));
+ ep->start_clu_hi = cpu_to_le16(CLUSTER_16(start_clu >> 16));
+} /* end of fat_set_entry_clu0 */
+
+static u64 fat_get_entry_size(DENTRY_T *p_entry)
+{
+ DOS_DENTRY_T *ep = (DOS_DENTRY_T *)p_entry;
+
+ return (u64)le32_to_cpu(ep->size);
+} /* end of fat_get_entry_size */
+
+static void fat_set_entry_size(DENTRY_T *p_entry, u64 size)
+{
+ DOS_DENTRY_T *ep = (DOS_DENTRY_T *)p_entry;
+
+ ep->size = cpu_to_le32((u32)size);
+} /* end of fat_set_entry_size */
+
+static void fat_get_entry_time(DENTRY_T *p_entry, TIMESTAMP_T *tp, u8 mode)
+{
+ u16 t = 0x00, d = 0x21;
+ DOS_DENTRY_T *ep = (DOS_DENTRY_T *) p_entry;
+
+ switch (mode) {
+ case TM_CREATE:
+ t = le16_to_cpu(ep->create_time);
+ d = le16_to_cpu(ep->create_date);
+ break;
+ case TM_MODIFY:
+ t = le16_to_cpu(ep->modify_time);
+ d = le16_to_cpu(ep->modify_date);
+ break;
+ }
+
+ tp->tz.value = 0x00;
+ tp->sec = (t & 0x001F) << 1;
+ tp->min = (t >> 5) & 0x003F;
+ tp->hour = (t >> 11);
+ tp->day = (d & 0x001F);
+ tp->mon = (d >> 5) & 0x000F;
+ tp->year = (d >> 9);
+} /* end of fat_get_entry_time */
+
+static void fat_set_entry_time(DENTRY_T *p_entry, TIMESTAMP_T *tp, u8 mode)
+{
+ u16 t, d;
+ DOS_DENTRY_T *ep = (DOS_DENTRY_T *) p_entry;
+
+ t = (tp->hour << 11) | (tp->min << 5) | (tp->sec >> 1);
+ d = (tp->year << 9) | (tp->mon << 5) | tp->day;
+
+ switch (mode) {
+ case TM_CREATE:
+ ep->create_time = cpu_to_le16(t);
+ ep->create_date = cpu_to_le16(d);
+ break;
+ case TM_MODIFY:
+ ep->modify_time = cpu_to_le16(t);
+ ep->modify_date = cpu_to_le16(d);
+ break;
+ }
+} /* end of fat_set_entry_time */
+
+static void __init_dos_entry(struct super_block *sb, DOS_DENTRY_T *ep, u32 type, u32 start_clu)
+{
+ TIMESTAMP_T tm, *tp;
+
+ fat_set_entry_type((DENTRY_T *) ep, type);
+ ep->start_clu_lo = cpu_to_le16(CLUSTER_16(start_clu));
+ ep->start_clu_hi = cpu_to_le16(CLUSTER_16(start_clu >> 16));
+ ep->size = 0;
+
+ tp = tm_now_sb(sb, &tm);
+ fat_set_entry_time((DENTRY_T *) ep, tp, TM_CREATE);
+ fat_set_entry_time((DENTRY_T *) ep, tp, TM_MODIFY);
+ ep->access_date = 0;
+ ep->create_time_ms = 0;
+} /* end of __init_dos_entry */
+
+static void __init_ext_entry(EXT_DENTRY_T *ep, s32 order, u8 chksum, u16 *uniname)
+{
+ s32 i;
+ u8 end = false;
+
+ fat_set_entry_type((DENTRY_T *) ep, TYPE_EXTEND);
+ ep->order = (u8) order;
+ ep->sysid = 0;
+ ep->checksum = chksum;
+ ep->start_clu = 0;
+
+ /* unaligned name */
+ for (i = 0; i < 5; i++) {
+ if (!end) {
+ put_unaligned_le16(*uniname, &(ep->unicode_0_4[i<<1]));
+ if (*uniname == 0x0)
+ end = true;
+ else
+ uniname++;
+ } else {
+ put_unaligned_le16(0xFFFF, &(ep->unicode_0_4[i<<1]));
+ }
+ }
+
+ /* aligned name */
+ for (i = 0; i < 6; i++) {
+ if (!end) {
+ ep->unicode_5_10[i] = cpu_to_le16(*uniname);
+ if (*uniname == 0x0)
+ end = true;
+ else
+ uniname++;
+ } else {
+ ep->unicode_5_10[i] = cpu_to_le16(0xFFFF);
+ }
+ }
+
+ /* aligned name */
+ for (i = 0; i < 2; i++) {
+ if (!end) {
+ ep->unicode_11_12[i] = cpu_to_le16(*uniname);
+ if (*uniname == 0x0)
+ end = true;
+ else
+ uniname++;
+ } else {
+ ep->unicode_11_12[i] = cpu_to_le16(0xFFFF);
+ }
+ }
+} /* end of __init_ext_entry */
+
+static s32 fat_init_dir_entry(struct super_block *sb, CHAIN_T *p_dir, s32 entry, u32 type,
+ u32 start_clu, u64 size)
+{
+ u64 sector;
+ DOS_DENTRY_T *dos_ep;
+
+ dos_ep = (DOS_DENTRY_T *) get_dentry_in_dir(sb, p_dir, entry, §or);
+ if (!dos_ep)
+ return -EIO;
+
+ __init_dos_entry(sb, dos_ep, type, start_clu);
+ dcache_modify(sb, sector);
+
+ return 0;
+} /* end of fat_init_dir_entry */
+
+static s32 fat_init_ext_entry(struct super_block *sb, CHAIN_T *p_dir, s32 entry, s32 num_entries,
+ UNI_NAME_T *p_uniname, DOS_NAME_T *p_dosname)
+{
+ s32 i;
+ u64 sector;
+ u8 chksum;
+ u16 *uniname = p_uniname->name;
+ DOS_DENTRY_T *dos_ep;
+ EXT_DENTRY_T *ext_ep;
+
+ dos_ep = (DOS_DENTRY_T *) get_dentry_in_dir(sb, p_dir, entry, §or);
+ if (!dos_ep)
+ return -EIO;
+
+ dos_ep->lcase = p_dosname->name_case;
+ memcpy(dos_ep->name, p_dosname->name, DOS_NAME_LENGTH);
+ if (dcache_modify(sb, sector))
+ return -EIO;
+
+ if ((--num_entries) > 0) {
+ chksum = calc_chksum_1byte((void *) dos_ep->name, DOS_NAME_LENGTH, 0);
+
+ for (i = 1; i < num_entries; i++) {
+ ext_ep = (EXT_DENTRY_T *) get_dentry_in_dir(sb, p_dir, entry-i, §or);
+ if (!ext_ep)
+ return -EIO;
+
+ __init_ext_entry(ext_ep, i, chksum, uniname);
+ if (dcache_modify(sb, sector))
+ return -EIO;
+ uniname += 13;
+ }
+
+ ext_ep = (EXT_DENTRY_T *) get_dentry_in_dir(sb, p_dir, entry-i, §or);
+ if (!ext_ep)
+ return -EIO;
+
+ __init_ext_entry(ext_ep, i+MSDOS_LAST_LFN, chksum, uniname);
+ if (dcache_modify(sb, sector))
+ return -EIO;
+ }
+
+ return 0;
+} /* end of fat_init_ext_entry */
+
+static s32 fat_delete_dir_entry(struct super_block *sb, CHAIN_T *p_dir, s32 entry, s32 order, s32 num_entries)
+{
+ s32 i;
+ u64 sector;
+ DENTRY_T *ep;
+
+ for (i = num_entries-1; i >= order; i--) {
+ ep = get_dentry_in_dir(sb, p_dir, entry-i, §or);
+ if (!ep)
+ return -EIO;
+
+ fat_set_entry_type(ep, TYPE_DELETED);
+ if (dcache_modify(sb, sector))
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/* return values of fat_find_dir_entry()
+ * >= 0 : return dir entiry position with the name in dir
+ * -EEXIST : (root dir, ".") it is the root dir itself
+ * -ENOENT : entry with the name does not exist
+ * -EIO : I/O error
+ */
+static inline s32 __get_dentries_per_clu(FS_INFO_T *fsi, s32 clu)
+{
+ if (IS_CLUS_FREE(clu)) /* FAT16 root_dir */
+ return fsi->dentries_in_root;
+
+ return fsi->dentries_per_clu;
+}
+
+static s32 fat_find_dir_entry(struct super_block *sb, FILE_ID_T *fid,
+ CHAIN_T *p_dir, UNI_NAME_T *p_uniname, s32 num_entries, DOS_NAME_T *p_dosname, u32 type)
+{
+ s32 i, rewind = 0, dentry = 0, end_eidx = 0;
+ s32 chksum = 0, lfn_ord = 0, lfn_len = 0;
+ s32 dentries_per_clu, num_empty = 0;
+ u32 entry_type;
+ u16 entry_uniname[14], *uniname = NULL;
+ CHAIN_T clu;
+ DENTRY_T *ep;
+ HINT_T *hint_stat = &fid->hint_stat;
+ HINT_FEMP_T candi_empty;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ /*
+ * REMARK:
+ * DOT and DOTDOT are handled by VFS layer
+ */
+
+ dentries_per_clu = __get_dentries_per_clu(fsi, p_dir->dir);
+ clu.dir = p_dir->dir;
+ clu.flags = p_dir->flags;
+
+ if (hint_stat->eidx) {
+ clu.dir = hint_stat->clu;
+ dentry = hint_stat->eidx;
+ end_eidx = dentry;
+ }
+
+ candi_empty.eidx = -1;
+
+ MMSG("lookup dir= %s\n", p_dosname->name);
+rewind:
+ while (!IS_CLUS_EOF(clu.dir)) {
+ i = dentry % dentries_per_clu;
+ for (; i < dentries_per_clu; i++, dentry++) {
+ if (rewind && (dentry == end_eidx))
+ goto not_found;
+
+ ep = get_dentry_in_dir(sb, &clu, i, NULL);
+ if (!ep)
+ return -EIO;
+
+ entry_type = fat_get_entry_type(ep);
+
+ /*
+ * Most directory entries have long name,
+ * So, we check extend directory entry first.
+ */
+ if (entry_type == TYPE_EXTEND) {
+ EXT_DENTRY_T *ext_ep = (EXT_DENTRY_T *)ep;
+ u32 cur_ord = (u32)ext_ep->order;
+ u32 cur_chksum = (s32)ext_ep->checksum;
+ s32 len = 13;
+ u16 unichar;
+
+ num_empty = 0;
+ candi_empty.eidx = -1;
+
+ /* check whether new lfn or not */
+ if (cur_ord & MSDOS_LAST_LFN) {
+ cur_ord &= ~(MSDOS_LAST_LFN);
+ chksum = cur_chksum;
+ len = (13 * (cur_ord-1));
+ uniname = (p_uniname->name + len);
+ lfn_ord = cur_ord + 1;
+ lfn_len = 0;
+
+ /* check minimum name length */
+ if (cur_ord &&
+ (len > p_uniname->name_len)) {
+ /* MISMATCHED NAME LENGTH */
+ lfn_len = -1;
+ }
+ len = 0;
+ }
+
+ /* invalid lfn order */
+ if (!cur_ord || (cur_ord > MAX_LFN_ORDER) ||
+ ((cur_ord + 1) != lfn_ord))
+ goto reset_dentry_set;
+
+ /* check checksum of directory entry set */
+ if (cur_chksum != chksum)
+ goto reset_dentry_set;
+
+ /* update order for next dentry */
+ lfn_ord = cur_ord;
+
+ /* check whether mismatched lfn or not */
+ if (lfn_len == -1) {
+ /* MISMATCHED LFN DENTRY SET */
+ continue;
+ }
+
+ if (!uniname) {
+ sdfat_fs_error(sb,
+ "%s : abnormal dentry "
+ "(start_clu[%u], "
+ "idx[%u])", __func__,
+ p_dir->dir, dentry);
+ sdfat_debug_bug_on(1);
+ return -EIO;
+ }
+
+ /* update position of name buffer */
+ uniname -= len;
+
+ /* get utf16 characters saved on this entry */
+ len = __extract_uni_name_from_ext_entry(ext_ep, entry_uniname, lfn_ord);
+
+ /* replace last char to null */
+ unichar = *(uniname+len);
+ *(uniname+len) = (u16)0x0;
+
+ /* uniname ext_dentry unit compare repeatdly */
+ if (nls_cmp_uniname(sb, uniname, entry_uniname)) {
+ /* DO HANDLE WRONG NAME */
+ lfn_len = -1;
+ } else {
+ /* add matched chars length */
+ lfn_len += len;
+ }
+
+ /* restore previous character */
+ *(uniname+len) = unichar;
+
+ /* jump to check next dentry */
+ continue;
+
+ } else if ((entry_type == TYPE_FILE) || (entry_type == TYPE_DIR)) {
+ DOS_DENTRY_T *dos_ep = (DOS_DENTRY_T *)ep;
+ u32 cur_chksum = (s32)calc_chksum_1byte(
+ (void *) dos_ep->name,
+ DOS_NAME_LENGTH, 0);
+
+ num_empty = 0;
+ candi_empty.eidx = -1;
+
+ MMSG("checking dir= %c%c%c%c%c%c%c%c%c%c%c\n",
+ dos_ep->name[0], dos_ep->name[1],
+ dos_ep->name[2], dos_ep->name[3],
+ dos_ep->name[4], dos_ep->name[5],
+ dos_ep->name[6], dos_ep->name[7],
+ dos_ep->name[8], dos_ep->name[9],
+ dos_ep->name[10]);
+
+ /*
+ * if there is no valid long filename,
+ * we should check short filename.
+ */
+ if (!lfn_len || (cur_chksum != chksum)) {
+ /* check shortname */
+ if ((p_dosname->name[0] != '\0') &&
+ !nls_cmp_sfn(sb,
+ p_dosname->name,
+ dos_ep->name)) {
+ goto found;
+ }
+ /* check name length */
+ } else if ((lfn_len > 0) &&
+ ((s32)p_uniname->name_len ==
+ lfn_len)) {
+ goto found;
+ }
+
+ /* DO HANDLE MISMATCHED SFN, FALL THROUGH */
+ } else if ((entry_type == TYPE_UNUSED) || (entry_type == TYPE_DELETED)) {
+ num_empty++;
+ if (candi_empty.eidx == -1) {
+ if (num_empty == 1) {
+ candi_empty.cur.dir = clu.dir;
+ candi_empty.cur.size = clu.size;
+ candi_empty.cur.flags = clu.flags;
+ }
+
+ if (num_empty >= num_entries) {
+ candi_empty.eidx = dentry - (num_empty - 1);
+ ASSERT(0 <= candi_empty.eidx);
+ candi_empty.count = num_empty;
+
+ if ((fid->hint_femp.eidx == -1) ||
+ (candi_empty.eidx <= fid->hint_femp.eidx)) {
+ memcpy(&fid->hint_femp,
+ &candi_empty,
+ sizeof(HINT_FEMP_T));
+ }
+ }
+ }
+
+ if (entry_type == TYPE_UNUSED)
+ goto not_found;
+ /* FALL THROUGH */
+ }
+reset_dentry_set:
+ /* TYPE_DELETED, TYPE_VOLUME OR MISMATCHED SFN */
+ lfn_ord = 0;
+ lfn_len = 0;
+ chksum = 0;
+ }
+
+ if (IS_CLUS_FREE(p_dir->dir))
+ break; /* FAT16 root_dir */
+
+ if (get_next_clus_safe(sb, &clu.dir))
+ return -EIO;
+ }
+
+not_found:
+ /* we started at not 0 index,so we should try to find target
+ * from 0 index to the index we started at.
+ */
+ if (!rewind && end_eidx) {
+ rewind = 1;
+ dentry = 0;
+ clu.dir = p_dir->dir;
+ /* reset dentry set */
+ lfn_ord = 0;
+ lfn_len = 0;
+ chksum = 0;
+ /* reset empty hint_*/
+ num_empty = 0;
+ candi_empty.eidx = -1;
+ goto rewind;
+ }
+
+ /* initialized hint_stat */
+ hint_stat->clu = p_dir->dir;
+ hint_stat->eidx = 0;
+ return -ENOENT;
+
+found:
+ /* next dentry we'll find is out of this cluster */
+ if (!((dentry + 1) % dentries_per_clu)) {
+ int ret = 0;
+ /* FAT16 root_dir */
+ if (IS_CLUS_FREE(p_dir->dir))
+ clu.dir = CLUS_EOF;
+ else
+ ret = get_next_clus_safe(sb, &clu.dir);
+
+ if (ret || IS_CLUS_EOF(clu.dir)) {
+ /* just initialized hint_stat */
+ hint_stat->clu = p_dir->dir;
+ hint_stat->eidx = 0;
+ return dentry;
+ }
+ }
+
+ hint_stat->clu = clu.dir;
+ hint_stat->eidx = dentry + 1;
+ return dentry;
+} /* end of fat_find_dir_entry */
+
+/* returns -EIO on error */
+static s32 fat_count_ext_entries(struct super_block *sb, CHAIN_T *p_dir, s32 entry, DENTRY_T *p_entry)
+{
+ s32 count = 0;
+ u8 chksum;
+ DOS_DENTRY_T *dos_ep = (DOS_DENTRY_T *) p_entry;
+ EXT_DENTRY_T *ext_ep;
+
+ chksum = calc_chksum_1byte((void *) dos_ep->name, DOS_NAME_LENGTH, 0);
+
+ for (entry--; entry >= 0; entry--) {
+ ext_ep = (EXT_DENTRY_T *)get_dentry_in_dir(sb, p_dir, entry, NULL);
+ if (!ext_ep)
+ return -EIO;
+
+ if ((fat_get_entry_type((DENTRY_T *)ext_ep) == TYPE_EXTEND) &&
+ (ext_ep->checksum == chksum)) {
+ count++;
+ if (ext_ep->order > MSDOS_LAST_LFN)
+ return count;
+ } else {
+ return count;
+ }
+ }
+
+ return count;
+}
+
+
+/*
+ * Name Conversion Functions
+ */
+static s32 __extract_uni_name_from_ext_entry(EXT_DENTRY_T *ep, u16 *uniname, s32 order)
+{
+ s32 i, len = 0;
+
+ for (i = 0; i < 5; i++) {
+ *uniname = get_unaligned_le16(&(ep->unicode_0_4[i<<1]));
+ if (*uniname == 0x0)
+ return len;
+ uniname++;
+ len++;
+ }
+
+ if (order < 20) {
+ for (i = 0; i < 6; i++) {
+ /* FIXME : unaligned? */
+ *uniname = le16_to_cpu(ep->unicode_5_10[i]);
+ if (*uniname == 0x0)
+ return len;
+ uniname++;
+ len++;
+ }
+ } else {
+ for (i = 0; i < 4; i++) {
+ /* FIXME : unaligned? */
+ *uniname = le16_to_cpu(ep->unicode_5_10[i]);
+ if (*uniname == 0x0)
+ return len;
+ uniname++;
+ len++;
+ }
+ *uniname = 0x0; /* uniname[MAX_NAME_LENGTH] */
+ return len;
+ }
+
+ for (i = 0; i < 2; i++) {
+ /* FIXME : unaligned? */
+ *uniname = le16_to_cpu(ep->unicode_11_12[i]);
+ if (*uniname == 0x0)
+ return len;
+ uniname++;
+ len++;
+ }
+
+ *uniname = 0x0;
+ return len;
+
+} /* end of __extract_uni_name_from_ext_entry */
+
+static void fat_get_uniname_from_ext_entry(struct super_block *sb, CHAIN_T *p_dir, s32 entry, u16 *uniname)
+{
+ u32 i;
+ u16 *name = uniname;
+ u32 chksum;
+
+ DOS_DENTRY_T *dos_ep =
+ (DOS_DENTRY_T *)get_dentry_in_dir(sb, p_dir, entry, NULL);
+
+ if (unlikely(!dos_ep))
+ goto invalid_lfn;
+
+ chksum = (u32)calc_chksum_1byte(
+ (void *) dos_ep->name,
+ DOS_NAME_LENGTH, 0);
+
+ for (entry--, i = 1; entry >= 0; entry--, i++) {
+ EXT_DENTRY_T *ep;
+
+ ep = (EXT_DENTRY_T *)get_dentry_in_dir(sb, p_dir, entry, NULL);
+ if (!ep)
+ goto invalid_lfn;
+
+ if (fat_get_entry_type((DENTRY_T *) ep) != TYPE_EXTEND)
+ goto invalid_lfn;
+
+ if (chksum != (u32)ep->checksum)
+ goto invalid_lfn;
+
+ if (i != (u32)(ep->order & ~(MSDOS_LAST_LFN)))
+ goto invalid_lfn;
+
+ __extract_uni_name_from_ext_entry(ep, name, (s32)i);
+ if (ep->order & MSDOS_LAST_LFN)
+ return;
+
+ name += 13;
+ }
+invalid_lfn:
+ *uniname = (u16)0x0;
+} /* end of fat_get_uniname_from_ext_entry */
+
+/* Find if the shortname exists
+ * and check if there are free entries
+ */
+static s32 __fat_find_shortname_entry(struct super_block *sb, CHAIN_T *p_dir,
+ u8 *p_dosname, s32 *offset, __attribute__((unused))int n_entry_needed)
+{
+ u32 type;
+ s32 i, dentry = 0;
+ s32 dentries_per_clu;
+ DENTRY_T *ep = NULL;
+ DOS_DENTRY_T *dos_ep = NULL;
+ CHAIN_T clu = *p_dir;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ if (offset)
+ *offset = -1;
+
+ if (IS_CLUS_FREE(clu.dir)) /* FAT16 root_dir */
+ dentries_per_clu = fsi->dentries_in_root;
+ else
+ dentries_per_clu = fsi->dentries_per_clu;
+
+ while (!IS_CLUS_EOF(clu.dir)) {
+ for (i = 0; i < dentries_per_clu; i++, dentry++) {
+ ep = get_dentry_in_dir(sb, &clu, i, NULL);
+ if (!ep)
+ return -EIO;
+
+ type = fat_get_entry_type(ep);
+
+ if ((type == TYPE_FILE) || (type == TYPE_DIR)) {
+ dos_ep = (DOS_DENTRY_T *)ep;
+ if (!nls_cmp_sfn(sb, p_dosname, dos_ep->name)) {
+ if (offset)
+ *offset = dentry;
+ return 0;
+ }
+ }
+ }
+
+ /* fat12/16 root dir */
+ if (IS_CLUS_FREE(clu.dir))
+ break;
+
+ if (get_next_clus_safe(sb, &clu.dir))
+ return -EIO;
+ }
+ return -ENOENT;
+}
+
+#ifdef CONFIG_SDFAT_FAT32_SHORTNAME_SEQ
+static void __fat_attach_count_to_dos_name(u8 *dosname, s32 count)
+{
+ s32 i, j, length;
+ s8 str_count[6];
+
+ snprintf(str_count, sizeof(str_count), "~%d", count);
+ length = strlen(str_count);
+
+ i = j = 0;
+ while (j <= (8 - length)) {
+ i = j;
+ if (dosname[j] == ' ')
+ break;
+ if (dosname[j] & 0x80)
+ j += 2;
+ else
+ j++;
+ }
+
+ for (j = 0; j < length; i++, j++)
+ dosname[i] = (u8) str_count[j];
+
+ if (i == 7)
+ dosname[7] = ' ';
+
+} /* end of __fat_attach_count_to_dos_name */
+#endif
+
+s32 fat_generate_dos_name_new(struct super_block *sb, CHAIN_T *p_dir, DOS_NAME_T *p_dosname, s32 n_entry_needed)
+{
+ s32 i;
+ s32 baselen, err;
+ u8 work[DOS_NAME_LENGTH], buf[5];
+ u8 tail;
+
+ baselen = 8;
+ memset(work, ' ', DOS_NAME_LENGTH);
+ memcpy(work, p_dosname->name, DOS_NAME_LENGTH);
+
+ while (baselen && (work[--baselen] == ' ')) {
+ /* DO NOTHING, JUST FOR CHECK_PATCH */
+ }
+
+ if (baselen > 6)
+ baselen = 6;
+
+ BUG_ON(baselen < 0);
+
+#ifdef CONFIG_SDFAT_FAT32_SHORTNAME_SEQ
+ /* example) namei_exfat.c -> NAMEI_~1 - NAMEI_~9 */
+ work[baselen] = '~';
+ for (i = 1; i < 10; i++) {
+ // '0' + i = 1 ~ 9 ASCII
+ work[baselen + 1] = '0' + i;
+ err = __fat_find_shortname_entry(sb, p_dir, work, NULL, n_entry_needed);
+ if (err == -ENOENT) {
+ /* void return */
+ __fat_attach_count_to_dos_name(p_dosname->name, i);
+ return 0;
+ }
+
+ /* any other error */
+ if (err)
+ return err;
+ }
+#endif
+
+ i = jiffies;
+ tail = (jiffies >> 16) & 0x7;
+
+ if (baselen > 2)
+ baselen = 2;
+
+ BUG_ON(baselen < 0);
+
+ work[baselen + 4] = '~';
+ // 1 ~ 8 ASCII
+ work[baselen + 5] = '1' + tail;
+ while (1) {
+ snprintf(buf, sizeof(buf), "%04X", i & 0xffff);
+ memcpy(&work[baselen], buf, 4);
+ err = __fat_find_shortname_entry(sb, p_dir, work, NULL, n_entry_needed);
+ if (err == -ENOENT) {
+ memcpy(p_dosname->name, work, DOS_NAME_LENGTH);
+ break;
+ }
+
+ /* any other error */
+ if (err)
+ return err;
+
+ i -= 11;
+ }
+ return 0;
+} /* end of generate_dos_name_new */
+
+static s32 fat_calc_num_entries(UNI_NAME_T *p_uniname)
+{
+ s32 len;
+
+ len = p_uniname->name_len;
+ if (len == 0)
+ return 0;
+
+ /* 1 dos name entry + extended entries */
+ return((len-1) / 13 + 2);
+
+} /* end of calc_num_enties */
+
+static s32 fat_check_max_dentries(FILE_ID_T *fid)
+{
+ if ((fid->size >> DENTRY_SIZE_BITS) >= MAX_FAT_DENTRIES) {
+ /* FAT spec allows a dir to grow upto 65536 dentries */
+ return -ENOSPC;
+ }
+ return 0;
+} /* end of check_max_dentries */
+
+
+/*
+ * File Operation Functions
+ */
+static FS_FUNC_T fat_fs_func = {
+ .alloc_cluster = fat_alloc_cluster,
+ .free_cluster = fat_free_cluster,
+ .count_used_clusters = fat_count_used_clusters,
+
+ .init_dir_entry = fat_init_dir_entry,
+ .init_ext_entry = fat_init_ext_entry,
+ .find_dir_entry = fat_find_dir_entry,
+ .delete_dir_entry = fat_delete_dir_entry,
+ .get_uniname_from_ext_entry = fat_get_uniname_from_ext_entry,
+ .count_ext_entries = fat_count_ext_entries,
+ .calc_num_entries = fat_calc_num_entries,
+ .check_max_dentries = fat_check_max_dentries,
+
+ .get_entry_type = fat_get_entry_type,
+ .set_entry_type = fat_set_entry_type,
+ .get_entry_attr = fat_get_entry_attr,
+ .set_entry_attr = fat_set_entry_attr,
+ .get_entry_flag = fat_get_entry_flag,
+ .set_entry_flag = fat_set_entry_flag,
+ .get_entry_clu0 = fat_get_entry_clu0,
+ .set_entry_clu0 = fat_set_entry_clu0,
+ .get_entry_size = fat_get_entry_size,
+ .set_entry_size = fat_set_entry_size,
+ .get_entry_time = fat_get_entry_time,
+ .set_entry_time = fat_set_entry_time,
+};
+
+static FS_FUNC_T amap_fat_fs_func = {
+ .alloc_cluster = amap_fat_alloc_cluster,
+ .free_cluster = fat_free_cluster,
+ .count_used_clusters = fat_count_used_clusters,
+
+ .init_dir_entry = fat_init_dir_entry,
+ .init_ext_entry = fat_init_ext_entry,
+ .find_dir_entry = fat_find_dir_entry,
+ .delete_dir_entry = fat_delete_dir_entry,
+ .get_uniname_from_ext_entry = fat_get_uniname_from_ext_entry,
+ .count_ext_entries = fat_count_ext_entries,
+ .calc_num_entries = fat_calc_num_entries,
+ .check_max_dentries = fat_check_max_dentries,
+
+ .get_entry_type = fat_get_entry_type,
+ .set_entry_type = fat_set_entry_type,
+ .get_entry_attr = fat_get_entry_attr,
+ .set_entry_attr = fat_set_entry_attr,
+ .get_entry_flag = fat_get_entry_flag,
+ .set_entry_flag = fat_set_entry_flag,
+ .get_entry_clu0 = fat_get_entry_clu0,
+ .set_entry_clu0 = fat_set_entry_clu0,
+ .get_entry_size = fat_get_entry_size,
+ .set_entry_size = fat_set_entry_size,
+ .get_entry_time = fat_get_entry_time,
+ .set_entry_time = fat_set_entry_time,
+
+ .get_au_stat = amap_get_au_stat,
+};
+
+static s32 mount_fat_common(struct super_block *sb, FS_INFO_T *fsi,
+ bpb_t *p_bpb, u32 root_sects)
+{
+ bool fat32 = root_sects == 0 ? true : false;
+
+ fsi->sect_per_clus = p_bpb->sect_per_clus;
+ if (!is_power_of_2(fsi->sect_per_clus)) {
+ sdfat_msg(sb, KERN_ERR, "bogus sectors per cluster %u",
+ fsi->sect_per_clus);
+ return -EINVAL;
+ }
+
+ fsi->sect_per_clus_bits = ilog2(p_bpb->sect_per_clus);
+ fsi->cluster_size_bits = fsi->sect_per_clus_bits + sb->s_blocksize_bits;
+ fsi->cluster_size = 1 << fsi->cluster_size_bits;
+ fsi->dentries_per_clu = 1 <<
+ (fsi->cluster_size_bits - DENTRY_SIZE_BITS);
+
+ fsi->vol_flag = VOL_CLEAN;
+ fsi->clu_srch_ptr = CLUS_BASE;
+ fsi->used_clusters = (u32)~0;
+ fsi->fs_func = &fat_fs_func;
+
+ fsi->num_FAT_sectors = le16_to_cpu(p_bpb->num_fat_sectors);
+ if (fat32) {
+ u32 fat32_len = le32_to_cpu(p_bpb->f32.num_fat32_sectors);
+
+ if (fat32_len) {
+ fsi->num_FAT_sectors = fat32_len;
+ } else if (fsi->num_FAT_sectors) {
+ /* SPEC violation for compatibility */
+ sdfat_msg(sb, KERN_WARNING,
+ "no fatsz32, try with fatsz16: %u",
+ fsi->num_FAT_sectors);
+ }
+ }
+
+ if (!fsi->num_FAT_sectors) {
+ sdfat_msg(sb, KERN_ERR, "bogus fat size");
+ return -EINVAL;
+ }
+
+ if (!p_bpb->num_fats) {
+ sdfat_msg(sb, KERN_ERR, "bogus number of FAT structure");
+ return -EINVAL;
+ }
+
+ if (p_bpb->num_fats > 2) {
+ sdfat_msg(sb, KERN_WARNING,
+ "unsupported number of FAT structure :%u, try with 2",
+ p_bpb->num_fats);
+ }
+
+ fsi->FAT1_start_sector = le16_to_cpu(p_bpb->num_reserved);
+ if (p_bpb->num_fats == 1)
+ fsi->FAT2_start_sector = fsi->FAT1_start_sector;
+ else
+ fsi->FAT2_start_sector = fsi->FAT1_start_sector +
+ fsi->num_FAT_sectors;
+
+ fsi->root_start_sector = fsi->FAT2_start_sector + fsi->num_FAT_sectors;
+ fsi->data_start_sector = fsi->root_start_sector + root_sects;
+
+ /* SPEC violation for compatibility */
+ fsi->num_sectors = get_unaligned_le16(p_bpb->num_sectors);
+ if (!fsi->num_sectors)
+ fsi->num_sectors = le32_to_cpu(p_bpb->num_huge_sectors);
+
+ if (!fsi->num_sectors) {
+ sdfat_msg(sb, KERN_ERR, "bogus number of total sector count");
+ return -EINVAL;
+ }
+
+ /* because the cluster index starts with 2 */
+ fsi->num_clusters = (u32)((fsi->num_sectors - fsi->data_start_sector) >>
+ fsi->sect_per_clus_bits) + CLUS_BASE;
+
+ return 0;
+}
+
+s32 mount_fat16(struct super_block *sb, pbr_t *p_pbr)
+{
+ u32 num_root_sectors;
+ bpb_t *p_bpb = &(p_pbr->bpb.fat);
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ fsi->vol_id = get_unaligned_le32(p_bpb->f16.vol_serial);
+ fsi->root_dir = 0;
+ fsi->dentries_in_root = get_unaligned_le16(p_bpb->num_root_entries);
+ if (!fsi->dentries_in_root) {
+ sdfat_msg(sb, KERN_ERR, "bogus number of max dentry count "
+ "of the root directory");
+ return -EINVAL;
+ }
+
+ num_root_sectors = fsi->dentries_in_root << DENTRY_SIZE_BITS;
+ num_root_sectors = ((num_root_sectors - 1) >> sb->s_blocksize_bits) + 1;
+
+ if (mount_fat_common(sb, fsi, p_bpb, num_root_sectors))
+ return -EINVAL;
+
+ fsi->vol_type = FAT16;
+ if (fsi->num_clusters < FAT12_THRESHOLD)
+ fsi->vol_type = FAT12;
+ fat_ent_ops_init(sb);
+
+ if (p_bpb->f16.state & FAT_VOL_DIRTY) {
+ fsi->vol_flag |= VOL_DIRTY;
+ sdfat_log_msg(sb, KERN_WARNING, "Volume was not properly "
+ "unmounted. Some data may be corrupt. "
+ "Please run fsck.");
+ }
+
+ return 0;
+} /* end of mount_fat16 */
+
+static sector_t __calc_hidden_sect(struct super_block *sb)
+{
+ struct block_device *bdev = sb->s_bdev;
+ sector_t hidden = 0;
+
+ if (!bdev)
+ goto out;
+
+ hidden = bdev->bd_part->start_sect;
+ /* a disk device, not a partition */
+ if (!hidden) {
+ if (bdev != bdev->bd_contains)
+ sdfat_log_msg(sb, KERN_WARNING,
+ "hidden(0), but disk has a partition table");
+ goto out;
+ }
+
+ if (sb->s_blocksize_bits != 9) {
+ ASSERT(sb->s_blocksize_bits > 9);
+ hidden >>= (sb->s_blocksize_bits - 9);
+ }
+
+out:
+ sdfat_log_msg(sb, KERN_INFO, "start_sect of part(%d) : %lld",
+ bdev ? bdev->bd_part->partno : -1, (s64)hidden);
+ return hidden;
+
+}
+
+s32 mount_fat32(struct super_block *sb, pbr_t *p_pbr)
+{
+ pbr32_t *p_bpb = (pbr32_t *)p_pbr;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ fsi->vol_id = get_unaligned_le32(p_bpb->bsx.vol_serial);
+ fsi->root_dir = le32_to_cpu(p_bpb->bpb.f32.root_cluster);
+ fsi->dentries_in_root = 0;
+
+ if (mount_fat_common(sb, fsi, &p_bpb->bpb, 0))
+ return -EINVAL;
+
+ /* Should be initialized before calling amap_create() */
+ fsi->vol_type = FAT32;
+ fat_ent_ops_init(sb);
+
+ /* Delayed / smart allocation related init */
+ fsi->reserved_clusters = 0;
+
+ /* AU Map Creation */
+ if (SDFAT_SB(sb)->options.improved_allocation & SDFAT_ALLOC_SMART) {
+ u32 hidden_sectors = le32_to_cpu(p_bpb->bpb.num_hid_sectors);
+ u32 calc_hid_sect = 0;
+ int ret;
+
+ /* calculate hidden sector size */
+ calc_hid_sect = __calc_hidden_sect(sb);
+ if (calc_hid_sect != hidden_sectors) {
+ sdfat_log_msg(sb, KERN_WARNING, "abnormal hidden "
+ "sector : bpb(%u) != ondisk(%u)",
+ hidden_sectors, calc_hid_sect);
+ if (SDFAT_SB(sb)->options.adj_hidsect) {
+ sdfat_log_msg(sb, KERN_INFO,
+ "adjustment hidden sector : "
+ "bpb(%u) -> ondisk(%u)",
+ hidden_sectors, calc_hid_sect);
+ hidden_sectors = calc_hid_sect;
+ }
+ }
+
+ SDFAT_SB(sb)->options.amap_opt.misaligned_sect = hidden_sectors;
+
+ /* calculate AU size if it's not set */
+ if (!SDFAT_SB(sb)->options.amap_opt.sect_per_au) {
+ SDFAT_SB(sb)->options.amap_opt.sect_per_au =
+ __calc_default_au_size(sb);
+ }
+
+ ret = amap_create(sb,
+ SDFAT_SB(sb)->options.amap_opt.pack_ratio,
+ SDFAT_SB(sb)->options.amap_opt.sect_per_au,
+ SDFAT_SB(sb)->options.amap_opt.misaligned_sect);
+ if (ret) {
+ sdfat_log_msg(sb, KERN_WARNING, "failed to create AMAP."
+ " disabling smart allocation. (err:%d)", ret);
+ SDFAT_SB(sb)->options.improved_allocation &= ~(SDFAT_ALLOC_SMART);
+ } else {
+ fsi->fs_func = &amap_fat_fs_func;
+ }
+ }
+
+ /* Check dependency of mount options */
+ if (SDFAT_SB(sb)->options.improved_allocation !=
+ (SDFAT_ALLOC_DELAY | SDFAT_ALLOC_SMART)) {
+ sdfat_log_msg(sb, KERN_INFO, "disabling defragmentation because"
+ " smart, delay options are disabled");
+ SDFAT_SB(sb)->options.defrag = 0;
+ }
+
+ if (p_bpb->bsx.state & FAT_VOL_DIRTY) {
+ fsi->vol_flag |= VOL_DIRTY;
+ sdfat_log_msg(sb, KERN_WARNING, "Volume was not properly "
+ "unmounted. Some data may be corrupt. "
+ "Please run fsck.");
+ }
+
+ return 0;
+} /* end of mount_fat32 */
+
+/* end of core_fat.c */
--- /dev/null
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/************************************************************************/
+/* */
+/* @PROJECT : exFAT & FAT12/16/32 File System */
+/* @FILE : dfr.c */
+/* @PURPOSE : Defragmentation support for SDFAT32 */
+/* */
+/*----------------------------------------------------------------------*/
+/* NOTES */
+/* */
+/* */
+/************************************************************************/
+
+#include <linux/version.h>
+#include <linux/list.h>
+#include <linux/blkdev.h>
+
+#include "sdfat.h"
+#include "core.h"
+#include "amap_smart.h"
+
+#ifdef CONFIG_SDFAT_DFR
+/**
+ * @fn defrag_get_info
+ * @brief get HW params for defrag daemon
+ * @return 0 on success, -errno otherwise
+ * @param sb super block
+ * @param arg defrag info arguments
+ * @remark protected by super_block
+ */
+int
+defrag_get_info(
+ IN struct super_block *sb,
+ OUT struct defrag_info_arg *arg)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
+
+ if (!arg)
+ return -EINVAL;
+
+ arg->sec_sz = sb->s_blocksize;
+ arg->clus_sz = fsi->cluster_size;
+ arg->total_sec = fsi->num_sectors;
+ arg->fat_offset_sec = fsi->FAT1_start_sector;
+ arg->fat_sz_sec = fsi->num_FAT_sectors;
+ arg->n_fat = (fsi->FAT1_start_sector == fsi->FAT2_start_sector) ? 1 : 2;
+
+ arg->sec_per_au = amap->option.au_size;
+ arg->hidden_sectors = amap->option.au_align_factor % amap->option.au_size;
+
+ return 0;
+}
+
+
+static int
+__defrag_scan_dir(
+ IN struct super_block *sb,
+ IN DOS_DENTRY_T *dos_ep,
+ IN loff_t i_pos,
+ OUT struct defrag_trav_arg *arg)
+{
+ FS_INFO_T *fsi = NULL;
+ UNI_NAME_T uniname;
+ unsigned int type = 0, start_clus = 0;
+ int err = -EPERM;
+
+ /* Check params */
+ ERR_HANDLE2((!sb || !dos_ep || !i_pos || !arg), err, -EINVAL);
+ fsi = &(SDFAT_SB(sb)->fsi);
+
+ /* Get given entry's type */
+ type = fsi->fs_func->get_entry_type((DENTRY_T *) dos_ep);
+
+ /* Check dos_ep */
+ if (!strncmp(dos_ep->name, DOS_CUR_DIR_NAME, DOS_NAME_LENGTH)) {
+ ;
+ } else if (!strncmp(dos_ep->name, DOS_PAR_DIR_NAME, DOS_NAME_LENGTH)) {
+ ;
+ } else if ((type == TYPE_DIR) || (type == TYPE_FILE)) {
+
+ /* Set start_clus */
+ SET32_HI(start_clus, le16_to_cpu(dos_ep->start_clu_hi));
+ SET32_LO(start_clus, le16_to_cpu(dos_ep->start_clu_lo));
+ arg->start_clus = start_clus;
+
+ /* Set type & i_pos */
+ if (type == TYPE_DIR)
+ arg->type = DFR_TRAV_TYPE_DIR;
+ else
+ arg->type = DFR_TRAV_TYPE_FILE;
+
+ arg->i_pos = i_pos;
+
+ /* Set name */
+ memset(&uniname, 0, sizeof(UNI_NAME_T));
+ get_uniname_from_dos_entry(sb, dos_ep, &uniname, 0x1);
+ /* FIXME :
+ * we should think that whether the size of arg->name
+ * is enough or not
+ */
+ nls_uni16s_to_vfsname(sb, &uniname,
+ arg->name, sizeof(arg->name));
+
+ err = 0;
+ /* End case */
+ } else if (type == TYPE_UNUSED) {
+ err = -ENOENT;
+ } else {
+ ;
+ }
+
+error:
+ return err;
+}
+
+
+/**
+ * @fn defrag_scan_dir
+ * @brief scan given directory
+ * @return 0 on success, -errno otherwise
+ * @param sb super block
+ * @param args traverse args
+ * @remark protected by inode_lock, super_block and volume lock
+ */
+int
+defrag_scan_dir(
+ IN struct super_block *sb,
+ INOUT struct defrag_trav_arg *args)
+{
+ struct sdfat_sb_info *sbi = NULL;
+ FS_INFO_T *fsi = NULL;
+ struct defrag_trav_header *header = NULL;
+ DOS_DENTRY_T *dos_ep;
+ CHAIN_T chain;
+ int dot_found = 0, args_idx = DFR_TRAV_HEADER_IDX + 1, clus = 0, index = 0;
+ int err = 0, j = 0;
+
+ /* Check params */
+ ERR_HANDLE2((!sb || !args), err, -EINVAL);
+ sbi = SDFAT_SB(sb);
+ fsi = &(sbi->fsi);
+ header = (struct defrag_trav_header *) args;
+
+ /* Exceptional case for ROOT */
+ if (header->i_pos == DFR_TRAV_ROOT_IPOS) {
+ header->start_clus = fsi->root_dir;
+ dfr_debug("IOC_DFR_TRAV for ROOT: start_clus %08x", header->start_clus);
+ dot_found = 1;
+ }
+
+ chain.dir = header->start_clus;
+ chain.size = 0;
+ chain.flags = 0;
+
+ /* Check if this is directory */
+ if (!dot_found) {
+ FAT32_CHECK_CLUSTER(fsi, chain.dir, err);
+ ERR_HANDLE(err);
+ dos_ep = (DOS_DENTRY_T *) get_dentry_in_dir(sb, &chain, 0, NULL);
+ ERR_HANDLE2(!dos_ep, err, -EIO);
+
+ if (strncmp(dos_ep->name, DOS_CUR_DIR_NAME, DOS_NAME_LENGTH)) {
+ err = -EINVAL;
+ dfr_err("Scan: Not a directory, err %d", err);
+ goto error;
+ }
+ }
+
+ /* For more-scan case */
+ if ((header->stat == DFR_TRAV_STAT_MORE) &&
+ (header->start_clus == sbi->dfr_hint_clus) &&
+ (sbi->dfr_hint_idx > 0)) {
+
+ index = sbi->dfr_hint_idx;
+ for (j = 0; j < (sbi->dfr_hint_idx / fsi->dentries_per_clu); j++) {
+ /* Follow FAT-chain */
+ FAT32_CHECK_CLUSTER(fsi, chain.dir, err);
+ ERR_HANDLE(err);
+ err = fat_ent_get(sb, chain.dir, &(chain.dir));
+ ERR_HANDLE(err);
+
+ if (!IS_CLUS_EOF(chain.dir)) {
+ clus++;
+ index -= fsi->dentries_per_clu;
+ } else {
+ /**
+ * This directory modified. Stop scanning.
+ */
+ err = -EINVAL;
+ dfr_err("Scan: SCAN_MORE failed, err %d", err);
+ goto error;
+ }
+ }
+
+ /* For first-scan case */
+ } else {
+ clus = 0;
+ index = 0;
+ }
+
+scan_fat_chain:
+ /* Scan given directory and get info of children */
+ for ( ; index < fsi->dentries_per_clu; index++) {
+ DOS_DENTRY_T *dos_ep = NULL;
+ loff_t i_pos = 0;
+
+ /* Get dos_ep */
+ FAT32_CHECK_CLUSTER(fsi, chain.dir, err);
+ ERR_HANDLE(err);
+ dos_ep = (DOS_DENTRY_T *) get_dentry_in_dir(sb, &chain, index, NULL);
+ ERR_HANDLE2(!dos_ep, err, -EIO);
+
+ /* Make i_pos for this entry */
+ SET64_HI(i_pos, header->start_clus);
+ SET64_LO(i_pos, clus * fsi->dentries_per_clu + index);
+
+ err = __defrag_scan_dir(sb, dos_ep, i_pos, &args[args_idx]);
+ if (!err) {
+ /* More-scan case */
+ if (++args_idx >= (PAGE_SIZE / sizeof(struct defrag_trav_arg))) {
+ sbi->dfr_hint_clus = header->start_clus;
+ sbi->dfr_hint_idx = clus * fsi->dentries_per_clu + index + 1;
+
+ header->stat = DFR_TRAV_STAT_MORE;
+ header->nr_entries = args_idx;
+ goto error;
+ }
+ /* Error case */
+ } else if (err == -EINVAL) {
+ sbi->dfr_hint_clus = sbi->dfr_hint_idx = 0;
+ dfr_err("Scan: err %d", err);
+ goto error;
+ /* End case */
+ } else if (err == -ENOENT) {
+ sbi->dfr_hint_clus = sbi->dfr_hint_idx = 0;
+ err = 0;
+ goto done;
+ } else {
+ /* DO NOTHING */
+ }
+ err = 0;
+ }
+
+ /* Follow FAT-chain */
+ FAT32_CHECK_CLUSTER(fsi, chain.dir, err);
+ ERR_HANDLE(err);
+ err = fat_ent_get(sb, chain.dir, &(chain.dir));
+ ERR_HANDLE(err);
+
+ if (!IS_CLUS_EOF(chain.dir)) {
+ index = 0;
+ clus++;
+ goto scan_fat_chain;
+ }
+
+done:
+ /* Update header */
+ header->stat = DFR_TRAV_STAT_DONE;
+ header->nr_entries = args_idx;
+
+error:
+ return err;
+}
+
+
+static int
+__defrag_validate_cluster_prev(
+ IN struct super_block *sb,
+ IN struct defrag_chunk_info *chunk)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ CHAIN_T dir;
+ DENTRY_T *ep = NULL;
+ unsigned int entry = 0, clus = 0;
+ int err = 0;
+
+ if (chunk->prev_clus == 0) {
+ /* For the first cluster of a file */
+ dir.dir = GET64_HI(chunk->i_pos);
+ dir.flags = 0x1; // Assume non-continuous
+
+ entry = GET64_LO(chunk->i_pos);
+
+ FAT32_CHECK_CLUSTER(fsi, dir.dir, err);
+ ERR_HANDLE(err);
+ ep = get_dentry_in_dir(sb, &dir, entry, NULL);
+ if (!ep) {
+ err = -EPERM;
+ goto error;
+ }
+
+ /* should call fat_get_entry_clu0(ep) */
+ clus = fsi->fs_func->get_entry_clu0(ep);
+ if (clus != chunk->d_clus) {
+ err = -ENXIO;
+ goto error;
+ }
+ } else {
+ /* Normal case */
+ FAT32_CHECK_CLUSTER(fsi, chunk->prev_clus, err);
+ ERR_HANDLE(err);
+ err = fat_ent_get(sb, chunk->prev_clus, &clus);
+ if (err)
+ goto error;
+ if (chunk->d_clus != clus)
+ err = -ENXIO;
+ }
+
+error:
+ return err;
+}
+
+
+static int
+__defrag_validate_cluster_next(
+ IN struct super_block *sb,
+ IN struct defrag_chunk_info *chunk)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ unsigned int clus = 0;
+ int err = 0;
+
+ /* Check next_clus */
+ FAT32_CHECK_CLUSTER(fsi, (chunk->d_clus + chunk->nr_clus - 1), err);
+ ERR_HANDLE(err);
+ err = fat_ent_get(sb, (chunk->d_clus + chunk->nr_clus - 1), &clus);
+ if (err)
+ goto error;
+ if (chunk->next_clus != (clus & FAT32_EOF))
+ err = -ENXIO;
+
+error:
+ return err;
+}
+
+
+/**
+ * @fn __defrag_check_au
+ * @brief check if this AU is in use
+ * @return 0 if idle, 1 if busy
+ * @param sb super block
+ * @param clus physical cluster num
+ * @param limit # of used clusters from daemon
+ */
+static int
+__defrag_check_au(
+ struct super_block *sb,
+ u32 clus,
+ u32 limit)
+{
+ unsigned int nr_free = amap_get_freeclus(sb, clus);
+
+#if defined(CONFIG_SDFAT_DFR_DEBUG) && defined(CONFIG_SDFAT_DBG_MSG)
+ if (nr_free < limit) {
+ AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
+ AU_INFO_T *au = GET_AU(amap, i_AU_of_CLU(amap, clus));
+
+ dfr_debug("AU[%d] nr_free %d, limit %d", au->idx, nr_free, limit);
+ }
+#endif
+ return ((nr_free < limit) ? 1 : 0);
+}
+
+
+/**
+ * @fn defrag_validate_cluster
+ * @brief validate cluster info of given chunk
+ * @return 0 on success, -errno otherwise
+ * @param inode inode of given chunk
+ * @param chunk given chunk
+ * @param skip_prev flag to skip checking previous cluster info
+ * @remark protected by super_block and volume lock
+ */
+int
+defrag_validate_cluster(
+ IN struct inode *inode,
+ IN struct defrag_chunk_info *chunk,
+ IN int skip_prev)
+{
+ struct super_block *sb = inode->i_sb;
+ FILE_ID_T *fid = &(SDFAT_I(inode)->fid);
+ unsigned int clus = 0;
+ int err = 0, i = 0;
+
+ /* If this inode is unlink-ed, skip it */
+ if (fid->dir.dir == DIR_DELETED)
+ return -ENOENT;
+
+ /* Skip working-AU */
+ err = amap_check_working(sb, chunk->d_clus);
+ if (err)
+ return -EBUSY;
+
+ /* Check # of free_clus of belonged AU */
+ err = __defrag_check_au(inode->i_sb, chunk->d_clus, CLUS_PER_AU(sb) - chunk->au_clus);
+ if (err)
+ return -EINVAL;
+
+ /* Check chunk's clusters */
+ for (i = 0; i < chunk->nr_clus; i++) {
+ err = fsapi_map_clus(inode, chunk->f_clus + i, &clus, ALLOC_NOWHERE);
+ if (err || (chunk->d_clus + i != clus)) {
+ if (!err)
+ err = -ENXIO;
+ goto error;
+ }
+ }
+
+ /* Check next_clus */
+ err = __defrag_validate_cluster_next(sb, chunk);
+ ERR_HANDLE(err);
+
+ if (!skip_prev) {
+ /* Check prev_clus */
+ err = __defrag_validate_cluster_prev(sb, chunk);
+ ERR_HANDLE(err);
+ }
+
+error:
+ return err;
+}
+
+
+/**
+ * @fn defrag_reserve_clusters
+ * @brief reserve clusters for defrag
+ * @return 0 on success, -errno otherwise
+ * @param sb super block
+ * @param nr_clus # of clusters to reserve
+ * @remark protected by super_block and volume lock
+ */
+int
+defrag_reserve_clusters(
+ INOUT struct super_block *sb,
+ IN int nr_clus)
+{
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ FS_INFO_T *fsi = &(sbi->fsi);
+
+ if (!(sbi->options.improved_allocation & SDFAT_ALLOC_DELAY))
+ /* Nothing to do */
+ return 0;
+
+ /* Check error case */
+ if (fsi->used_clusters + fsi->reserved_clusters + nr_clus >= fsi->num_clusters - 2) {
+ return -ENOSPC;
+ } else if (fsi->reserved_clusters + nr_clus < 0) {
+ dfr_err("Reserve count: reserved_clusters %d, nr_clus %d",
+ fsi->reserved_clusters, nr_clus);
+ BUG_ON(fsi->reserved_clusters + nr_clus < 0);
+ }
+
+ sbi->dfr_reserved_clus += nr_clus;
+ fsi->reserved_clusters += nr_clus;
+
+ return 0;
+}
+
+
+/**
+ * @fn defrag_mark_ignore
+ * @brief mark corresponding AU to be ignored
+ * @return 0 on success, -errno otherwise
+ * @param sb super block
+ * @param clus given cluster num
+ * @remark protected by super_block
+ */
+int
+defrag_mark_ignore(
+ INOUT struct super_block *sb,
+ IN unsigned int clus)
+{
+ int err = 0;
+
+ if (SDFAT_SB(sb)->options.improved_allocation & SDFAT_ALLOC_SMART)
+ err = amap_mark_ignore(sb, clus);
+
+ if (err)
+ dfr_debug("err %d", err);
+ return err;
+}
+
+
+/**
+ * @fn defrag_unmark_ignore_all
+ * @brief unmark all ignored AUs
+ * @return void
+ * @param sb super block
+ * @remark protected by super_block
+ */
+void
+defrag_unmark_ignore_all(struct super_block *sb)
+{
+ if (SDFAT_SB(sb)->options.improved_allocation & SDFAT_ALLOC_SMART)
+ amap_unmark_ignore_all(sb);
+}
+
+
+/**
+ * @fn defrag_map_cluster
+ * @brief get_block function for defrag dests
+ * @return 0 on success, -errno otherwise
+ * @param inode inode
+ * @param clu_offset logical cluster offset
+ * @param clu mapped cluster (physical)
+ * @remark protected by super_block and volume lock
+ */
+int
+defrag_map_cluster(
+ struct inode *inode,
+ unsigned int clu_offset,
+ unsigned int *clu)
+{
+ struct super_block *sb = inode->i_sb;
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+#ifdef CONFIG_SDFAT_DFR_PACKING
+ AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
+#endif
+ FILE_ID_T *fid = &(SDFAT_I(inode)->fid);
+ struct defrag_info *ino_dfr = &(SDFAT_I(inode)->dfr_info);
+ struct defrag_chunk_info *chunk = NULL;
+ CHAIN_T new_clu;
+ int i = 0, nr_new = 0, err = 0;
+
+ /* Get corresponding chunk */
+ for (i = 0; i < ino_dfr->nr_chunks; i++) {
+ chunk = &(ino_dfr->chunks[i]);
+
+ if ((chunk->f_clus <= clu_offset) && (clu_offset < chunk->f_clus + chunk->nr_clus)) {
+ /* For already allocated new_clus */
+ if (sbi->dfr_new_clus[chunk->new_idx + clu_offset - chunk->f_clus]) {
+ *clu = sbi->dfr_new_clus[chunk->new_idx + clu_offset - chunk->f_clus];
+ return 0;
+ }
+ break;
+ }
+ }
+ BUG_ON(!chunk);
+
+ fscore_set_vol_flags(sb, VOL_DIRTY, 0);
+
+ new_clu.dir = CLUS_EOF;
+ new_clu.size = 0;
+ new_clu.flags = fid->flags;
+
+ /* Allocate new cluster */
+#ifdef CONFIG_SDFAT_DFR_PACKING
+ if (amap->n_clean_au * DFR_FULL_RATIO <= amap->n_au * DFR_DEFAULT_PACKING_RATIO)
+ err = fsi->fs_func->alloc_cluster(sb, 1, &new_clu, ALLOC_COLD_PACKING);
+ else
+ err = fsi->fs_func->alloc_cluster(sb, 1, &new_clu, ALLOC_COLD_ALIGNED);
+#else
+ err = fsi->fs_func->alloc_cluster(sb, 1, &new_clu, ALLOC_COLD_ALIGNED);
+#endif
+
+ if (err) {
+ dfr_err("Map: 1 %d", 0);
+ return err;
+ }
+
+ /* Decrease reserved cluster count */
+ defrag_reserve_clusters(sb, -1);
+
+ /* Add new_clus info in ino_dfr */
+ sbi->dfr_new_clus[chunk->new_idx + clu_offset - chunk->f_clus] = new_clu.dir;
+
+ /* Make FAT-chain for new_clus */
+ for (i = 0; i < chunk->nr_clus; i++) {
+#if 0
+ if (sbi->dfr_new_clus[chunk->new_idx + i])
+ nr_new++;
+ else
+ break;
+#else
+ if (!sbi->dfr_new_clus[chunk->new_idx + i])
+ break;
+ nr_new++;
+#endif
+ }
+ if (nr_new == chunk->nr_clus) {
+ for (i = 0; i < chunk->nr_clus - 1; i++) {
+ FAT32_CHECK_CLUSTER(fsi, sbi->dfr_new_clus[chunk->new_idx + i], err);
+ BUG_ON(err);
+ if (fat_ent_set(sb,
+ sbi->dfr_new_clus[chunk->new_idx + i],
+ sbi->dfr_new_clus[chunk->new_idx + i + 1]))
+ return -EIO;
+ }
+ }
+
+ *clu = new_clu.dir;
+ return 0;
+}
+
+
+/**
+ * @fn defrag_writepage_end_io
+ * @brief check WB status of requested page
+ * @return void
+ * @param page page
+ */
+void
+defrag_writepage_end_io(
+ INOUT struct page *page)
+{
+ struct super_block *sb = page->mapping->host->i_sb;
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ struct defrag_info *ino_dfr = &(SDFAT_I(page->mapping->host)->dfr_info);
+ unsigned int clus_start = 0, clus_end = 0;
+ int i = 0;
+
+ /* Check if this inode is on defrag */
+ if (atomic_read(&ino_dfr->stat) != DFR_INO_STAT_REQ)
+ return;
+
+ clus_start = page->index / PAGES_PER_CLUS(sb);
+ clus_end = clus_start + 1;
+
+ /* Check each chunk in given inode */
+ for (i = 0; i < ino_dfr->nr_chunks; i++) {
+ struct defrag_chunk_info *chunk = &(ino_dfr->chunks[i]);
+ unsigned int chunk_start = 0, chunk_end = 0;
+
+ chunk_start = chunk->f_clus;
+ chunk_end = chunk->f_clus + chunk->nr_clus;
+
+ if ((clus_start >= chunk_start) && (clus_end <= chunk_end)) {
+ int off = clus_start - chunk_start;
+
+ clear_bit((page->index & (PAGES_PER_CLUS(sb) - 1)),
+ (volatile unsigned long *)&(sbi->dfr_page_wb[chunk->new_idx + off]));
+ }
+ }
+}
+
+
+/**
+ * @fn __defrag_check_wb
+ * @brief check if WB for given chunk completed
+ * @return 0 on success, -errno otherwise
+ * @param sbi super block info
+ * @param chunk given chunk
+ */
+static int
+__defrag_check_wb(
+ IN struct sdfat_sb_info *sbi,
+ IN struct defrag_chunk_info *chunk)
+{
+ int err = 0, wb_i = 0, i = 0, nr_new = 0;
+
+ if (!sbi || !chunk)
+ return -EINVAL;
+
+ /* Check WB complete status first */
+ for (wb_i = 0; wb_i < chunk->nr_clus; wb_i++) {
+ if (atomic_read((atomic_t *)&(sbi->dfr_page_wb[chunk->new_idx + wb_i]))) {
+ err = -EBUSY;
+ break;
+ }
+ }
+
+ /**
+ * Check NEW_CLUS status.
+ * writepage_end_io cannot check whole WB complete status,
+ * so we need to check NEW_CLUS status.
+ */
+ for (i = 0; i < chunk->nr_clus; i++)
+ if (sbi->dfr_new_clus[chunk->new_idx + i])
+ nr_new++;
+
+ if (nr_new == chunk->nr_clus) {
+ err = 0;
+ if ((wb_i != chunk->nr_clus) && (wb_i != chunk->nr_clus - 1))
+ dfr_debug("submit_fullpage_bio() called on a page (nr_clus %d, wb_i %d)",
+ chunk->nr_clus, wb_i);
+
+ BUG_ON(nr_new > chunk->nr_clus);
+ } else {
+ dfr_debug("nr_new %d, nr_clus %d", nr_new, chunk->nr_clus);
+ err = -EBUSY;
+ }
+
+ /* Update chunk's state */
+ if (!err)
+ chunk->stat |= DFR_CHUNK_STAT_WB;
+
+ return err;
+}
+
+
+static void
+__defrag_check_fat_old(
+ IN struct super_block *sb,
+ IN struct inode *inode,
+ IN struct defrag_chunk_info *chunk)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ unsigned int clus = 0;
+ int err = 0, idx = 0, max_idx = 0;
+
+ /* Get start_clus */
+ clus = SDFAT_I(inode)->fid.start_clu;
+
+ /* Follow FAT-chain */
+ #define num_clusters(val) ((val) ? (s32)((val - 1) >> fsi->cluster_size_bits) + 1 : 0)
+ max_idx = num_clusters(SDFAT_I(inode)->i_size_ondisk);
+ for (idx = 0; idx < max_idx; idx++) {
+
+ FAT32_CHECK_CLUSTER(fsi, clus, err);
+ ERR_HANDLE(err);
+ err = fat_ent_get(sb, clus, &clus);
+ ERR_HANDLE(err);
+
+ if ((idx < max_idx - 1) && (IS_CLUS_EOF(clus) || IS_CLUS_FREE(clus))) {
+ dfr_err("FAT: inode %p, max_idx %d, idx %d, clus %08x, "
+ "f_clus %d, nr_clus %d", inode, max_idx,
+ idx, clus, chunk->f_clus, chunk->nr_clus);
+ BUG_ON(idx < max_idx - 1);
+ goto error;
+ }
+ }
+
+error:
+ return;
+}
+
+
+static void
+__defrag_check_fat_new(
+ IN struct super_block *sb,
+ IN struct inode *inode,
+ IN struct defrag_chunk_info *chunk)
+{
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ unsigned int clus = 0;
+ int i = 0, err = 0;
+
+ /* Check start of FAT-chain */
+ if (chunk->prev_clus) {
+ FAT32_CHECK_CLUSTER(fsi, chunk->prev_clus, err);
+ BUG_ON(err);
+ err = fat_ent_get(sb, chunk->prev_clus, &clus);
+ BUG_ON(err);
+ } else {
+ clus = SDFAT_I(inode)->fid.start_clu;
+ }
+ if (sbi->dfr_new_clus[chunk->new_idx] != clus) {
+ dfr_err("FAT: inode %p, start_clus %08x, read_clus %08x",
+ inode, sbi->dfr_new_clus[chunk->new_idx], clus);
+ err = EIO;
+ goto error;
+ }
+
+ /* Check inside of FAT-chain */
+ if (chunk->nr_clus > 1) {
+ for (i = 0; i < chunk->nr_clus - 1; i++) {
+ FAT32_CHECK_CLUSTER(fsi, sbi->dfr_new_clus[chunk->new_idx + i], err);
+ BUG_ON(err);
+ err = fat_ent_get(sb, sbi->dfr_new_clus[chunk->new_idx + i], &clus);
+ BUG_ON(err);
+ if (sbi->dfr_new_clus[chunk->new_idx + i + 1] != clus) {
+ dfr_err("FAT: inode %p, new_clus %08x, read_clus %08x",
+ inode, sbi->dfr_new_clus[chunk->new_idx], clus);
+ err = EIO;
+ goto error;
+ }
+ }
+ clus = 0;
+ }
+
+ /* Check end of FAT-chain */
+ FAT32_CHECK_CLUSTER(fsi, sbi->dfr_new_clus[chunk->new_idx + chunk->nr_clus - 1], err);
+ BUG_ON(err);
+ err = fat_ent_get(sb, sbi->dfr_new_clus[chunk->new_idx + chunk->nr_clus - 1], &clus);
+ BUG_ON(err);
+ if ((chunk->next_clus & 0x0FFFFFFF) != (clus & 0x0FFFFFFF)) {
+ dfr_err("FAT: inode %p, next_clus %08x, read_clus %08x", inode, chunk->next_clus, clus);
+ err = EIO;
+ }
+
+error:
+ BUG_ON(err);
+}
+
+
+/**
+ * @fn __defrag_update_dirent
+ * @brief update DIR entry for defrag req
+ * @return void
+ * @param sb super block
+ * @param chunk given chunk
+ */
+static void
+__defrag_update_dirent(
+ struct super_block *sb,
+ struct defrag_chunk_info *chunk)
+{
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ FS_INFO_T *fsi = &SDFAT_SB(sb)->fsi;
+ CHAIN_T dir;
+ DOS_DENTRY_T *dos_ep;
+ unsigned int entry = 0;
+ unsigned long long sector = 0;
+ unsigned short hi = 0, lo = 0;
+ int err = 0;
+
+ dir.dir = GET64_HI(chunk->i_pos);
+ dir.flags = 0x1; // Assume non-continuous
+
+ entry = GET64_LO(chunk->i_pos);
+
+ FAT32_CHECK_CLUSTER(fsi, dir.dir, err);
+ BUG_ON(err);
+ dos_ep = (DOS_DENTRY_T *) get_dentry_in_dir(sb, &dir, entry, §or);
+
+ hi = GET32_HI(sbi->dfr_new_clus[chunk->new_idx]);
+ lo = GET32_LO(sbi->dfr_new_clus[chunk->new_idx]);
+
+ dos_ep->start_clu_hi = cpu_to_le16(hi);
+ dos_ep->start_clu_lo = cpu_to_le16(lo);
+
+ dcache_modify(sb, sector);
+}
+
+
+/**
+ * @fn defrag_update_fat_prev
+ * @brief update FAT chain for defrag requests
+ * @return void
+ * @param sb super block
+ * @param force flag to force FAT update
+ * @remark protected by super_block and volume lock
+ */
+void
+defrag_update_fat_prev(
+ struct super_block *sb,
+ int force)
+{
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ FS_INFO_T *fsi = &(sbi->fsi);
+ struct defrag_info *sb_dfr = &sbi->dfr_info, *ino_dfr = NULL;
+ int skip = 0, done = 0;
+
+ /* Check if FS_ERROR occurred */
+ if (sb->s_flags & MS_RDONLY) {
+ dfr_err("RDONLY partition (err %d)", -EPERM);
+ goto out;
+ }
+
+ list_for_each_entry(ino_dfr, &sb_dfr->entry, entry) {
+ struct inode *inode = &(container_of(ino_dfr, struct sdfat_inode_info, dfr_info)->vfs_inode);
+ struct sdfat_inode_info *ino_info = SDFAT_I(inode);
+ struct defrag_chunk_info *chunk_prev = NULL;
+ int i = 0, j = 0;
+
+ mutex_lock(&ino_dfr->lock);
+ BUG_ON(atomic_read(&ino_dfr->stat) != DFR_INO_STAT_REQ);
+ for (i = 0; i < ino_dfr->nr_chunks; i++) {
+ struct defrag_chunk_info *chunk = NULL;
+ int err = 0;
+
+ chunk = &(ino_dfr->chunks[i]);
+ BUG_ON(!chunk);
+
+ /* Do nothing for already passed chunk */
+ if (chunk->stat == DFR_CHUNK_STAT_PASS) {
+ done++;
+ continue;
+ }
+
+ /* Handle error case */
+ if (chunk->stat == DFR_CHUNK_STAT_ERR) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ /* Double-check clusters */
+ if (chunk_prev &&
+ (chunk->f_clus == chunk_prev->f_clus + chunk_prev->nr_clus) &&
+ (chunk_prev->stat == DFR_CHUNK_STAT_PASS)) {
+
+ err = defrag_validate_cluster(inode, chunk, 1);
+
+ /* Handle continuous chunks in a file */
+ if (!err) {
+ chunk->prev_clus =
+ sbi->dfr_new_clus[chunk_prev->new_idx + chunk_prev->nr_clus - 1];
+ dfr_debug("prev->f_clus %d, prev->nr_clus %d, chunk->f_clus %d",
+ chunk_prev->f_clus, chunk_prev->nr_clus, chunk->f_clus);
+ }
+ } else {
+ err = defrag_validate_cluster(inode, chunk, 0);
+ }
+
+ if (err) {
+ dfr_err("Cluster validation: inode %p, chunk->f_clus %d, err %d",
+ inode, chunk->f_clus, err);
+ goto error;
+ }
+
+ /**
+ * Skip update_fat_prev if WB or update_fat_next not completed.
+ * Go to error case if FORCE set.
+ */
+ if (__defrag_check_wb(sbi, chunk) || (chunk->stat != DFR_CHUNK_STAT_PREP)) {
+ if (force) {
+ err = -EPERM;
+ dfr_err("Skip case: inode %p, stat %x, f_clus %d, err %d",
+ inode, chunk->stat, chunk->f_clus, err);
+ goto error;
+ }
+ skip++;
+ continue;
+ }
+
+#ifdef CONFIG_SDFAT_DFR_DEBUG
+ /* SPO test */
+ defrag_spo_test(sb, DFR_SPO_RANDOM, __func__);
+#endif
+
+ /* Update chunk's previous cluster */
+ if (chunk->prev_clus == 0) {
+ /* For the first cluster of a file */
+ /* Update ino_info->fid.start_clu */
+ ino_info->fid.start_clu = sbi->dfr_new_clus[chunk->new_idx];
+ __defrag_update_dirent(sb, chunk);
+ } else {
+ FAT32_CHECK_CLUSTER(fsi, chunk->prev_clus, err);
+ BUG_ON(err);
+ if (fat_ent_set(sb,
+ chunk->prev_clus,
+ sbi->dfr_new_clus[chunk->new_idx])) {
+ err = -EIO;
+ goto error;
+ }
+ }
+
+ /* Clear extent cache */
+ extent_cache_inval_inode(inode);
+
+ /* Update FID info */
+ ino_info->fid.hint_bmap.off = CLUS_EOF;
+ ino_info->fid.hint_bmap.clu = 0;
+
+ /* Clear old FAT-chain */
+ for (j = 0; j < chunk->nr_clus; j++)
+ defrag_free_cluster(sb, chunk->d_clus + j);
+
+ /* Mark this chunk PASS */
+ chunk->stat = DFR_CHUNK_STAT_PASS;
+ __defrag_check_fat_new(sb, inode, chunk);
+
+ done++;
+
+error:
+ if (err) {
+ /**
+ * chunk->new_idx != 0 means this chunk needs to be cleaned up
+ */
+ if (chunk->new_idx) {
+ /* Free already allocated clusters */
+ for (j = 0; j < chunk->nr_clus; j++) {
+ if (sbi->dfr_new_clus[chunk->new_idx + j]) {
+ defrag_free_cluster(sb, sbi->dfr_new_clus[chunk->new_idx + j]);
+ sbi->dfr_new_clus[chunk->new_idx + j] = 0;
+ }
+ }
+
+ __defrag_check_fat_old(sb, inode, chunk);
+ }
+
+ /**
+ * chunk->new_idx == 0 means this chunk already cleaned up
+ */
+ chunk->new_idx = 0;
+ chunk->stat = DFR_CHUNK_STAT_ERR;
+ }
+
+ chunk_prev = chunk;
+ }
+ BUG_ON(!mutex_is_locked(&ino_dfr->lock));
+ mutex_unlock(&ino_dfr->lock);
+ }
+
+out:
+ if (skip) {
+ dfr_debug("%s skipped (nr_reqs %d, done %d, skip %d)",
+ __func__, sb_dfr->nr_chunks - 1, done, skip);
+ } else {
+ /* Make dfr_reserved_clus zero */
+ if (sbi->dfr_reserved_clus > 0) {
+ if (fsi->reserved_clusters < sbi->dfr_reserved_clus) {
+ dfr_err("Reserved count: reserved_clus %d, dfr_reserved_clus %d",
+ fsi->reserved_clusters, sbi->dfr_reserved_clus);
+ BUG_ON(fsi->reserved_clusters < sbi->dfr_reserved_clus);
+ }
+
+ defrag_reserve_clusters(sb, 0 - sbi->dfr_reserved_clus);
+ }
+
+ dfr_debug("%s done (nr_reqs %d, done %d)", __func__, sb_dfr->nr_chunks - 1, done);
+ }
+}
+
+
+/**
+ * @fn defrag_update_fat_next
+ * @brief update FAT chain for defrag requests
+ * @return void
+ * @param sb super block
+ * @remark protected by super_block and volume lock
+ */
+void
+defrag_update_fat_next(
+ struct super_block *sb)
+{
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ struct defrag_info *sb_dfr = &sbi->dfr_info, *ino_dfr = NULL;
+ struct defrag_chunk_info *chunk = NULL;
+ int done = 0, i = 0, j = 0, err = 0;
+
+ /* Check if FS_ERROR occurred */
+ if (sb->s_flags & MS_RDONLY) {
+ dfr_err("RDONLY partition (err %d)", -EROFS);
+ goto out;
+ }
+
+ list_for_each_entry(ino_dfr, &sb_dfr->entry, entry) {
+
+ for (i = 0; i < ino_dfr->nr_chunks; i++) {
+ int skip = 0;
+
+ chunk = &(ino_dfr->chunks[i]);
+
+ /* Do nothing if error occurred or update_fat_next already passed */
+ if (chunk->stat == DFR_CHUNK_STAT_ERR)
+ continue;
+ if (chunk->stat & DFR_CHUNK_STAT_FAT) {
+ done++;
+ continue;
+ }
+
+ /* Ship this chunk if get_block not passed for this chunk */
+ for (j = 0; j < chunk->nr_clus; j++) {
+ if (sbi->dfr_new_clus[chunk->new_idx + j] == 0) {
+ skip = 1;
+ break;
+ }
+ }
+ if (skip)
+ continue;
+
+ /* Update chunk's next cluster */
+ FAT32_CHECK_CLUSTER(fsi,
+ sbi->dfr_new_clus[chunk->new_idx + chunk->nr_clus - 1], err);
+ BUG_ON(err);
+ if (fat_ent_set(sb,
+ sbi->dfr_new_clus[chunk->new_idx + chunk->nr_clus - 1],
+ chunk->next_clus))
+ goto out;
+
+#ifdef CONFIG_SDFAT_DFR_DEBUG
+ /* SPO test */
+ defrag_spo_test(sb, DFR_SPO_RANDOM, __func__);
+#endif
+
+ /* Update chunk's state */
+ chunk->stat |= DFR_CHUNK_STAT_FAT;
+ done++;
+ }
+ }
+
+out:
+ dfr_debug("%s done (nr_reqs %d, done %d)", __func__, sb_dfr->nr_chunks - 1, done);
+}
+
+
+/**
+ * @fn defrag_check_discard
+ * @brief check if we can send discard for this AU, if so, send discard
+ * @return void
+ * @param sb super block
+ * @remark protected by super_block and volume lock
+ */
+void
+defrag_check_discard(
+ IN struct super_block *sb)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
+ AU_INFO_T *au = NULL;
+ struct defrag_info *sb_dfr = &(SDFAT_SB(sb)->dfr_info);
+ unsigned int tmp[DFR_MAX_AU_MOVED];
+ int i = 0, j = 0;
+
+ BUG_ON(!amap);
+
+ if (!(SDFAT_SB(sb)->options.discard) ||
+ !(SDFAT_SB(sb)->options.improved_allocation & SDFAT_ALLOC_SMART))
+ return;
+
+ memset(tmp, 0, sizeof(int) * DFR_MAX_AU_MOVED);
+
+ for (i = REQ_HEADER_IDX + 1; i < sb_dfr->nr_chunks; i++) {
+ struct defrag_chunk_info *chunk = &(sb_dfr->chunks[i]);
+ int skip = 0;
+
+ au = GET_AU(amap, i_AU_of_CLU(amap, chunk->d_clus));
+
+ /* Send DISCARD for free AU */
+ if ((IS_AU_IGNORED(au, amap)) &&
+ (amap_get_freeclus(sb, chunk->d_clus) == CLUS_PER_AU(sb))) {
+ sector_t blk = 0, nr_blks = 0;
+ unsigned int au_align_factor = amap->option.au_align_factor % amap->option.au_size;
+
+ BUG_ON(au->idx == 0);
+
+ /* Avoid multiple DISCARD */
+ for (j = 0; j < DFR_MAX_AU_MOVED; j++) {
+ if (tmp[j] == au->idx) {
+ skip = 1;
+ break;
+ }
+ }
+ if (skip == 1)
+ continue;
+
+ /* Send DISCARD cmd */
+ blk = (sector_t) (((au->idx * CLUS_PER_AU(sb)) << fsi->sect_per_clus_bits)
+ - au_align_factor);
+ nr_blks = ((sector_t)CLUS_PER_AU(sb)) << fsi->sect_per_clus_bits;
+
+ dfr_debug("Send DISCARD for AU[%d] (blk %08zx)", au->idx, blk);
+ sb_issue_discard(sb, blk, nr_blks, GFP_NOFS, 0);
+
+ /* Save previous AU's index */
+ for (j = 0; j < DFR_MAX_AU_MOVED; j++) {
+ if (!tmp[j]) {
+ tmp[j] = au->idx;
+ break;
+ }
+ }
+ }
+ }
+}
+
+
+/**
+ * @fn defrag_free_cluster
+ * @brief free uneccessary cluster
+ * @return void
+ * @param sb super block
+ * @param clus physical cluster num
+ * @remark protected by super_block and volume lock
+ */
+int
+defrag_free_cluster(
+ struct super_block *sb,
+ unsigned int clus)
+{
+ FS_INFO_T *fsi = &SDFAT_SB(sb)->fsi;
+ unsigned int val = 0;
+ s32 err = 0;
+
+ FAT32_CHECK_CLUSTER(fsi, clus, err);
+ BUG_ON(err);
+ if (fat_ent_get(sb, clus, &val))
+ return -EIO;
+ if (val) {
+ if (fat_ent_set(sb, clus, 0))
+ return -EIO;
+ } else {
+ dfr_err("Free: Already freed, clus %08x, val %08x", clus, val);
+ BUG_ON(!val);
+ }
+
+ set_sb_dirty(sb);
+ fsi->used_clusters--;
+ if (fsi->amap)
+ amap_release_cluster(sb, clus);
+
+ return 0;
+}
+
+
+/**
+ * @fn defrag_check_defrag_required
+ * @brief check if defrag required
+ * @return 1 if required, 0 otherwise
+ * @param sb super block
+ * @param totalau # of total AUs
+ * @param cleanau # of clean AUs
+ * @param fullau # of full AUs
+ * @remark protected by super_block
+ */
+int
+defrag_check_defrag_required(
+ IN struct super_block *sb,
+ OUT int *totalau,
+ OUT int *cleanau,
+ OUT int *fullau)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ AMAP_T *amap = NULL;
+ int clean_ratio = 0, frag_ratio = 0;
+ int ret = 0;
+
+ if (!sb || !(SDFAT_SB(sb)->options.defrag))
+ return 0;
+
+ /* Check DFR_DEFAULT_STOP_RATIO first */
+ fsi = &(SDFAT_SB(sb)->fsi);
+ if (fsi->used_clusters == (unsigned int)(~0)) {
+ if (fsi->fs_func->count_used_clusters(sb, &fsi->used_clusters))
+ return -EIO;
+ }
+ if (fsi->used_clusters * DFR_FULL_RATIO >= fsi->num_clusters * DFR_DEFAULT_STOP_RATIO) {
+ dfr_debug("used_clusters %d, num_clusters %d", fsi->used_clusters, fsi->num_clusters);
+ return 0;
+ }
+
+ /* Check clean/frag ratio */
+ amap = SDFAT_SB(sb)->fsi.amap;
+ BUG_ON(!amap);
+
+ clean_ratio = (amap->n_clean_au * 100) / amap->n_au;
+ if (amap->n_full_au)
+ frag_ratio = ((amap->n_au - amap->n_clean_au) * 100) / amap->n_full_au;
+ else
+ frag_ratio = ((amap->n_au - amap->n_clean_au) * 100) /
+ (fsi->used_clusters * CLUS_PER_AU(sb));
+
+ /*
+ * Wake-up defrag_daemon:
+ * when # of clean AUs too small, or frag_ratio exceeds the limit
+ */
+ if ((clean_ratio < DFR_DEFAULT_WAKEUP_RATIO) ||
+ ((clean_ratio < DFR_DEFAULT_CLEAN_RATIO) && (frag_ratio >= DFR_DEFAULT_FRAG_RATIO))) {
+
+ if (totalau)
+ *totalau = amap->n_au;
+ if (cleanau)
+ *cleanau = amap->n_clean_au;
+ if (fullau)
+ *fullau = amap->n_full_au;
+ ret = 1;
+ }
+
+ return ret;
+}
+
+
+/**
+ * @fn defrag_check_defrag_required
+ * @brief check defrag status on inode
+ * @return 1 if defrag in on, 0 otherwise
+ * @param inode inode
+ * @param start logical start addr
+ * @param end logical end addr
+ * @param cancel flag to cancel defrag
+ * @param caller caller info
+ */
+int
+defrag_check_defrag_on(
+ INOUT struct inode *inode,
+ IN loff_t start,
+ IN loff_t end,
+ IN int cancel,
+ IN const char *caller)
+{
+ struct super_block *sb = inode->i_sb;
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ FS_INFO_T *fsi = &(sbi->fsi);
+ struct defrag_info *ino_dfr = &(SDFAT_I(inode)->dfr_info);
+ unsigned int clus_start = 0, clus_end = 0;
+ int ret = 0, i = 0;
+
+ if (!inode || (start == end))
+ return 0;
+
+ mutex_lock(&ino_dfr->lock);
+ /* Check if this inode is on defrag */
+ if (atomic_read(&ino_dfr->stat) == DFR_INO_STAT_REQ) {
+
+ clus_start = start >> (fsi->cluster_size_bits);
+ clus_end = (end >> (fsi->cluster_size_bits)) +
+ ((end & (fsi->cluster_size - 1)) ? 1 : 0);
+
+ if (!ino_dfr->chunks)
+ goto error;
+
+ /* Check each chunk in given inode */
+ for (i = 0; i < ino_dfr->nr_chunks; i++) {
+ struct defrag_chunk_info *chunk = &(ino_dfr->chunks[i]);
+ unsigned int chunk_start = 0, chunk_end = 0;
+
+ /* Skip this chunk when error occurred or it already passed defrag process */
+ if ((chunk->stat == DFR_CHUNK_STAT_ERR) || (chunk->stat == DFR_CHUNK_STAT_PASS))
+ continue;
+
+ chunk_start = chunk->f_clus;
+ chunk_end = chunk->f_clus + chunk->nr_clus;
+
+ if (((clus_start >= chunk_start) && (clus_start < chunk_end)) ||
+ ((clus_end > chunk_start) && (clus_end <= chunk_end)) ||
+ ((clus_start < chunk_start) && (clus_end > chunk_end))) {
+ ret = 1;
+ if (cancel) {
+ chunk->stat = DFR_CHUNK_STAT_ERR;
+ dfr_debug("Defrag canceled: inode %p, start %08x, end %08x, caller %s",
+ inode, clus_start, clus_end, caller);
+ }
+ }
+ }
+ }
+
+error:
+ BUG_ON(!mutex_is_locked(&ino_dfr->lock));
+ mutex_unlock(&ino_dfr->lock);
+ return ret;
+}
+
+
+#ifdef CONFIG_SDFAT_DFR_DEBUG
+/**
+ * @fn defrag_spo_test
+ * @brief test SPO while defrag running
+ * @return void
+ * @param sb super block
+ * @param flag SPO debug flag
+ * @param caller caller info
+ */
+void
+defrag_spo_test(
+ struct super_block *sb,
+ int flag,
+ const char *caller)
+{
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+
+ if (!sb || !(SDFAT_SB(sb)->options.defrag))
+ return;
+
+ if (flag == sbi->dfr_spo_flag) {
+ dfr_err("Defrag SPO test (flag %d, caller %s)", flag, caller);
+ panic("Defrag SPO test");
+ }
+}
+#endif /* CONFIG_SDFAT_DFR_DEBUG */
+
+
+#endif /* CONFIG_SDFAT_DFR */
--- /dev/null
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _SDFAT_DEFRAG_H
+#define _SDFAT_DEFRAG_H
+
+#ifdef CONFIG_SDFAT_DFR
+
+/* Tuning parameters */
+#define DFR_MIN_TIMEOUT (1 * HZ) // Minimum timeout for forced-sync
+#define DFR_DEFAULT_TIMEOUT (10 * HZ) // Default timeout for forced-sync
+
+#define DFR_DEFAULT_CLEAN_RATIO (50) // Wake-up daemon when clean AU ratio under 50%
+#define DFR_DEFAULT_WAKEUP_RATIO (10) // Wake-up daemon when clean AU ratio under 10%, regardless of frag_ratio
+
+#define DFR_DEFAULT_FRAG_RATIO (130) // Wake-up daemon when frag_ratio over 130%
+
+#define DFR_DEFAULT_PACKING_RATIO (10) // Call allocator with PACKING flag, when clean AU ratio under 10%
+
+#define DFR_DEFAULT_STOP_RATIO (98) // Stop defrag_daemon when disk used ratio over 98%
+#define DFR_FULL_RATIO (100)
+
+#define DFR_MAX_AU_MOVED (16) // Maximum # of AUs for a request
+
+
+/* Debugging support*/
+#define dfr_err(fmt, args...) pr_err("DFR: " fmt "\n", args)
+
+#ifdef CONFIG_SDFAT_DFR_DEBUG
+#define dfr_debug(fmt, args...) pr_debug("DFR: " fmt "\n", args)
+#else
+#define dfr_debug(fmt, args...)
+#endif
+
+
+/* Error handling */
+#define ERR_HANDLE(err) { \
+ if (err) { \
+ dfr_debug("err %d", err); \
+ goto error; \
+ } \
+}
+
+#define ERR_HANDLE2(cond, err, val) { \
+ if (cond) { \
+ err = val; \
+ dfr_debug("err %d", err); \
+ goto error; \
+ } \
+}
+
+
+/* Arguments IN-OUT */
+#define IN
+#define OUT
+#define INOUT
+
+
+/* Macros */
+#define GET64_HI(var64) ((unsigned int)((var64) >> 32))
+#define GET64_LO(var64) ((unsigned int)(((var64) << 32) >> 32))
+#define SET64_HI(dst64, var32) { (dst64) = ((loff_t)(var32) << 32) | ((dst64) & 0x00000000ffffffffLL); }
+#define SET64_LO(dst64, var32) { (dst64) = ((dst64) & 0xffffffff00000000LL) | ((var32) & 0x00000000ffffffffLL); }
+
+#define GET32_HI(var32) ((unsigned short)((var32) >> 16))
+#define GET32_LO(var32) ((unsigned short)(((var32) << 16) >> 16))
+#define SET32_HI(dst32, var16) { (dst32) = ((unsigned int)(var16) << 16) | ((dst32) & 0x0000ffff); }
+#define SET32_LO(dst32, var16) { (dst32) = ((dst32) & 0xffff0000) | ((unsigned int)(var16) & 0x0000ffff); }
+
+
+/* FAT32 related */
+#define FAT32_EOF (0x0fffffff)
+#define FAT32_RESERVED (0x0ffffff7)
+#define FAT32_UNUSED_CLUS (2)
+
+#define CLUS_PER_AU(sb) ( \
+ (SDFAT_SB(sb)->options.amap_opt.sect_per_au) >> (SDFAT_SB(sb)->fsi.sect_per_clus_bits) \
+)
+#define PAGES_PER_AU(sb) ( \
+ ((SDFAT_SB(sb)->options.amap_opt.sect_per_au) << ((sb)->s_blocksize_bits)) \
+ >> PAGE_SHIFT \
+)
+#define PAGES_PER_CLUS(sb) ((SDFAT_SB(sb)->fsi.cluster_size) >> PAGE_SHIFT)
+
+#define FAT32_CHECK_CLUSTER(fsi, clus, err) \
+ { \
+ if (((clus) < FAT32_UNUSED_CLUS) || \
+ ((clus) > (fsi)->num_clusters) || \
+ ((clus) >= FAT32_RESERVED)) { \
+ dfr_err("clus %08x, fsi->num_clusters %08x", (clus), (fsi)->num_clusters); \
+ err = -EINVAL; \
+ } else { \
+ err = 0; \
+ } \
+ }
+
+
+/* IOCTL_DFR_INFO */
+struct defrag_info_arg {
+ /* PBS info */
+ unsigned int sec_sz;
+ unsigned int clus_sz;
+ unsigned long long total_sec;
+ unsigned long long fat_offset_sec;
+ unsigned int fat_sz_sec;
+ unsigned int n_fat;
+ unsigned int hidden_sectors;
+
+ /* AU info */
+ unsigned int sec_per_au;
+};
+
+
+/* IOC_DFR_TRAV */
+#define DFR_TRAV_HEADER_IDX (0)
+
+#define DFR_TRAV_TYPE_HEADER (0x0000000F)
+#define DFR_TRAV_TYPE_DIR (1)
+#define DFR_TRAV_TYPE_FILE (2)
+#define DFR_TRAV_TYPE_TEST (DFR_TRAV_TYPE_HEADER | 0x10000000)
+
+#define DFR_TRAV_ROOT_IPOS (0xFFFFFFFFFFFFFFFFLL)
+
+struct defrag_trav_arg {
+ int type;
+ unsigned int start_clus;
+ loff_t i_pos;
+ char name[MAX_DOSNAME_BUF_SIZE];
+ char dummy1;
+ int dummy2;
+};
+
+#define DFR_TRAV_STAT_DONE (0x1)
+#define DFR_TRAV_STAT_MORE (0x2)
+#define DFR_TRAV_STAT_ERR (0xFF)
+
+struct defrag_trav_header {
+ int type;
+ unsigned int start_clus;
+ loff_t i_pos;
+ char name[MAX_DOSNAME_BUF_SIZE];
+ char stat;
+ unsigned int nr_entries;
+};
+
+
+/* IOC_DFR_REQ */
+#define REQ_HEADER_IDX (0)
+
+#define DFR_CHUNK_STAT_ERR (0xFFFFFFFF)
+#define DFR_CHUNK_STAT_REQ (0x1)
+#define DFR_CHUNK_STAT_WB (0x2)
+#define DFR_CHUNK_STAT_FAT (0x4)
+#define DFR_CHUNK_STAT_PREP (DFR_CHUNK_STAT_REQ | DFR_CHUNK_STAT_WB | DFR_CHUNK_STAT_FAT)
+#define DFR_CHUNK_STAT_PASS (0x0000000F)
+
+struct defrag_chunk_header {
+ int mode;
+ unsigned int nr_chunks;
+ loff_t dummy1;
+ int dummy2[4];
+ union {
+ int *dummy3;
+ int dummy4;
+ };
+ int dummy5;
+};
+
+struct defrag_chunk_info {
+ int stat;
+ /* File related */
+ unsigned int f_clus;
+ loff_t i_pos;
+ /* Cluster related */
+ unsigned int d_clus;
+ unsigned int nr_clus;
+ unsigned int prev_clus;
+ unsigned int next_clus;
+ union {
+ void *dummy;
+ /* req status */
+ unsigned int new_idx;
+ };
+ /* AU related */
+ unsigned int au_clus;
+};
+
+
+/* Global info */
+#define DFR_MODE_BACKGROUND (0x1)
+#define DFR_MODE_FOREGROUND (0x2)
+#define DFR_MODE_ONESHOT (0x4)
+#define DFR_MODE_BATCHED (0x8)
+#define DFR_MODE_TEST (DFR_MODE_BACKGROUND | 0x10000000)
+
+#define DFR_SB_STAT_IDLE (0)
+#define DFR_SB_STAT_REQ (1)
+#define DFR_SB_STAT_VALID (2)
+
+#define DFR_INO_STAT_IDLE (0)
+#define DFR_INO_STAT_REQ (1)
+struct defrag_info {
+ struct mutex lock;
+ atomic_t stat;
+ struct defrag_chunk_info *chunks;
+ unsigned int nr_chunks;
+ struct list_head entry;
+};
+
+
+/* SPO test flags */
+#define DFR_SPO_NONE (0)
+#define DFR_SPO_NORMAL (1)
+#define DFR_SPO_DISCARD (2)
+#define DFR_SPO_FAT_NEXT (3)
+#define DFR_SPO_RANDOM (4)
+
+
+/* Extern functions */
+int defrag_get_info(struct super_block *sb, struct defrag_info_arg *arg);
+
+int defrag_scan_dir(struct super_block *sb, struct defrag_trav_arg *arg);
+
+int defrag_validate_cluster(struct inode *inode, struct defrag_chunk_info *chunk, int skip_prev);
+int defrag_reserve_clusters(struct super_block *sb, int nr_clus);
+int defrag_mark_ignore(struct super_block *sb, unsigned int clus);
+void defrag_unmark_ignore_all(struct super_block *sb);
+
+int defrag_map_cluster(struct inode *inode, unsigned int clu_offset, unsigned int *clu);
+void defrag_writepage_end_io(struct page *page);
+
+void defrag_update_fat_prev(struct super_block *sb, int force);
+void defrag_update_fat_next(struct super_block *sb);
+void defrag_check_discard(struct super_block *sb);
+int defrag_free_cluster(struct super_block *sb, unsigned int clus);
+
+int defrag_check_defrag_required(struct super_block *sb, int *totalau, int *cleanau, int *fullau);
+int defrag_check_defrag_on(struct inode *inode, loff_t start, loff_t end, int cancel, const char *caller);
+
+#ifdef CONFIG_SDFAT_DFR_DEBUG
+void defrag_spo_test(struct super_block *sb, int flag, const char *caller);
+#endif
+
+#endif /* CONFIG_SDFAT_DFR */
+
+#endif /* _SDFAT_DEFRAG_H */
+
--- /dev/null
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * linux/fs/fat/cache.c
+ *
+ * Written 1992,1993 by Werner Almesberger
+ *
+ * Mar 1999. AV. Changed cache, so that it uses the starting cluster instead
+ * of inode number.
+ * May 1999. AV. Fixed the bogosity with FAT32 (read "FAT28"). Fscking lusers.
+ */
+
+/************************************************************************/
+/* */
+/* PROJECT : exFAT & FAT12/16/32 File System */
+/* FILE : extent.c */
+/* PURPOSE : Improve the performance of traversing fat chain. */
+/* */
+/*----------------------------------------------------------------------*/
+/* NOTES */
+/* */
+/* */
+/************************************************************************/
+
+#include <linux/slab.h>
+#include "sdfat.h"
+#include "core.h"
+
+#define EXTENT_CACHE_VALID 0
+/* this must be > 0. */
+#define EXTENT_MAX_CACHE 16
+
+struct extent_cache {
+ struct list_head cache_list;
+ u32 nr_contig; /* number of contiguous clusters */
+ u32 fcluster; /* cluster number in the file. */
+ u32 dcluster; /* cluster number on disk. */
+};
+
+struct extent_cache_id {
+ u32 id;
+ u32 nr_contig;
+ u32 fcluster;
+ u32 dcluster;
+};
+
+static struct kmem_cache *extent_cache_cachep;
+
+static void init_once(void *c)
+{
+ struct extent_cache *cache = (struct extent_cache *)c;
+
+ INIT_LIST_HEAD(&cache->cache_list);
+}
+
+s32 extent_cache_init(void)
+{
+ extent_cache_cachep = kmem_cache_create("sdfat_extent_cache",
+ sizeof(struct extent_cache),
+ 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
+ init_once);
+ if (!extent_cache_cachep)
+ return -ENOMEM;
+ return 0;
+}
+
+void extent_cache_shutdown(void)
+{
+ if (!extent_cache_cachep)
+ return;
+ kmem_cache_destroy(extent_cache_cachep);
+}
+
+void extent_cache_init_inode(struct inode *inode)
+{
+ EXTENT_T *extent = &(SDFAT_I(inode)->fid.extent);
+
+ spin_lock_init(&extent->cache_lru_lock);
+ extent->nr_caches = 0;
+ extent->cache_valid_id = EXTENT_CACHE_VALID + 1;
+ INIT_LIST_HEAD(&extent->cache_lru);
+}
+
+static inline struct extent_cache *extent_cache_alloc(void)
+{
+ return kmem_cache_alloc(extent_cache_cachep, GFP_NOFS);
+}
+
+static inline void extent_cache_free(struct extent_cache *cache)
+{
+ BUG_ON(!list_empty(&cache->cache_list));
+ kmem_cache_free(extent_cache_cachep, cache);
+}
+
+static inline void extent_cache_update_lru(struct inode *inode,
+ struct extent_cache *cache)
+{
+ EXTENT_T *extent = &(SDFAT_I(inode)->fid.extent);
+
+ if (extent->cache_lru.next != &cache->cache_list)
+ list_move(&cache->cache_list, &extent->cache_lru);
+}
+
+static u32 extent_cache_lookup(struct inode *inode, u32 fclus,
+ struct extent_cache_id *cid,
+ u32 *cached_fclus, u32 *cached_dclus)
+{
+ EXTENT_T *extent = &(SDFAT_I(inode)->fid.extent);
+
+ static struct extent_cache nohit = { .fcluster = 0, };
+
+ struct extent_cache *hit = &nohit, *p;
+ u32 offset = CLUS_EOF;
+
+ spin_lock(&extent->cache_lru_lock);
+ list_for_each_entry(p, &extent->cache_lru, cache_list) {
+ /* Find the cache of "fclus" or nearest cache. */
+ if (p->fcluster <= fclus && hit->fcluster < p->fcluster) {
+ hit = p;
+ if ((hit->fcluster + hit->nr_contig) < fclus) {
+ offset = hit->nr_contig;
+ } else {
+ offset = fclus - hit->fcluster;
+ break;
+ }
+ }
+ }
+ if (hit != &nohit) {
+ extent_cache_update_lru(inode, hit);
+
+ cid->id = extent->cache_valid_id;
+ cid->nr_contig = hit->nr_contig;
+ cid->fcluster = hit->fcluster;
+ cid->dcluster = hit->dcluster;
+ *cached_fclus = cid->fcluster + offset;
+ *cached_dclus = cid->dcluster + offset;
+ }
+ spin_unlock(&extent->cache_lru_lock);
+
+ return offset;
+}
+
+static struct extent_cache *extent_cache_merge(struct inode *inode,
+ struct extent_cache_id *new)
+{
+ EXTENT_T *extent = &(SDFAT_I(inode)->fid.extent);
+
+ struct extent_cache *p;
+
+ list_for_each_entry(p, &extent->cache_lru, cache_list) {
+ /* Find the same part as "new" in cluster-chain. */
+ if (p->fcluster == new->fcluster) {
+ ASSERT(p->dcluster == new->dcluster);
+ if (new->nr_contig > p->nr_contig)
+ p->nr_contig = new->nr_contig;
+ return p;
+ }
+ }
+ return NULL;
+}
+
+static void extent_cache_add(struct inode *inode, struct extent_cache_id *new)
+{
+ EXTENT_T *extent = &(SDFAT_I(inode)->fid.extent);
+
+ struct extent_cache *cache, *tmp;
+
+ if (new->fcluster == -1) /* dummy cache */
+ return;
+
+ spin_lock(&extent->cache_lru_lock);
+ if (new->id != EXTENT_CACHE_VALID &&
+ new->id != extent->cache_valid_id)
+ goto out; /* this cache was invalidated */
+
+ cache = extent_cache_merge(inode, new);
+ if (cache == NULL) {
+ if (extent->nr_caches < EXTENT_MAX_CACHE) {
+ extent->nr_caches++;
+ spin_unlock(&extent->cache_lru_lock);
+
+ tmp = extent_cache_alloc();
+ if (!tmp) {
+ spin_lock(&extent->cache_lru_lock);
+ extent->nr_caches--;
+ spin_unlock(&extent->cache_lru_lock);
+ return;
+ }
+
+ spin_lock(&extent->cache_lru_lock);
+ cache = extent_cache_merge(inode, new);
+ if (cache != NULL) {
+ extent->nr_caches--;
+ extent_cache_free(tmp);
+ goto out_update_lru;
+ }
+ cache = tmp;
+ } else {
+ struct list_head *p = extent->cache_lru.prev;
+ cache = list_entry(p, struct extent_cache, cache_list);
+ }
+ cache->fcluster = new->fcluster;
+ cache->dcluster = new->dcluster;
+ cache->nr_contig = new->nr_contig;
+ }
+out_update_lru:
+ extent_cache_update_lru(inode, cache);
+out:
+ spin_unlock(&extent->cache_lru_lock);
+}
+
+/*
+ * Cache invalidation occurs rarely, thus the LRU chain is not updated. It
+ * fixes itself after a while.
+ */
+static void __extent_cache_inval_inode(struct inode *inode)
+{
+ EXTENT_T *extent = &(SDFAT_I(inode)->fid.extent);
+ struct extent_cache *cache;
+
+ while (!list_empty(&extent->cache_lru)) {
+ cache = list_entry(extent->cache_lru.next,
+ struct extent_cache, cache_list);
+ list_del_init(&cache->cache_list);
+ extent->nr_caches--;
+ extent_cache_free(cache);
+ }
+ /* Update. The copy of caches before this id is discarded. */
+ extent->cache_valid_id++;
+ if (extent->cache_valid_id == EXTENT_CACHE_VALID)
+ extent->cache_valid_id++;
+}
+
+void extent_cache_inval_inode(struct inode *inode)
+{
+ EXTENT_T *extent = &(SDFAT_I(inode)->fid.extent);
+
+ spin_lock(&extent->cache_lru_lock);
+ __extent_cache_inval_inode(inode);
+ spin_unlock(&extent->cache_lru_lock);
+}
+
+static inline s32 cache_contiguous(struct extent_cache_id *cid, u32 dclus)
+{
+ cid->nr_contig++;
+ return ((cid->dcluster + cid->nr_contig) == dclus);
+}
+
+static inline void cache_init(struct extent_cache_id *cid, u32 fclus, u32 dclus)
+{
+ cid->id = EXTENT_CACHE_VALID;
+ cid->fcluster = fclus;
+ cid->dcluster = dclus;
+ cid->nr_contig = 0;
+}
+
+s32 extent_get_clus(struct inode *inode, u32 cluster, u32 *fclus,
+ u32 *dclus, u32 *last_dclus, s32 allow_eof)
+{
+ struct super_block *sb = inode->i_sb;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ u32 limit = fsi->num_clusters;
+ FILE_ID_T *fid = &(SDFAT_I(inode)->fid);
+ struct extent_cache_id cid;
+ u32 content;
+
+ /* FOR GRACEFUL ERROR HANDLING */
+ if (IS_CLUS_FREE(fid->start_clu)) {
+ sdfat_fs_error(sb, "invalid access to "
+ "extent cache (entry 0x%08x)", fid->start_clu);
+ ASSERT(0);
+ return -EIO;
+ }
+
+ *fclus = 0;
+ *dclus = fid->start_clu;
+ *last_dclus = *dclus;
+
+ /*
+ * Don`t use extent_cache if zero offset or non-cluster allocation
+ */
+ if ((cluster == 0) || IS_CLUS_EOF(*dclus))
+ return 0;
+
+ cache_init(&cid, CLUS_EOF, CLUS_EOF);
+
+ if (extent_cache_lookup(inode, cluster, &cid, fclus, dclus) == CLUS_EOF) {
+ /*
+ * dummy, always not contiguous
+ * This is reinitialized by cache_init(), later.
+ */
+ ASSERT((cid.id == EXTENT_CACHE_VALID)
+ && (cid.fcluster == CLUS_EOF)
+ && (cid.dcluster == CLUS_EOF)
+ && (cid.nr_contig == 0));
+ }
+
+ if (*fclus == cluster)
+ return 0;
+
+ while (*fclus < cluster) {
+ /* prevent the infinite loop of cluster chain */
+ if (*fclus > limit) {
+ sdfat_fs_error(sb,
+ "%s: detected the cluster chain loop"
+ " (i_pos %u)", __func__,
+ (*fclus));
+ return -EIO;
+ }
+
+ if (fat_ent_get_safe(sb, *dclus, &content))
+ return -EIO;
+
+ *last_dclus = *dclus;
+ *dclus = content;
+ (*fclus)++;
+
+ if (IS_CLUS_EOF(content)) {
+ if (!allow_eof) {
+ sdfat_fs_error(sb,
+ "%s: invalid cluster chain (i_pos %u,"
+ "last_clus 0x%08x is EOF)",
+ __func__, *fclus, (*last_dclus));
+ return -EIO;
+ }
+
+ break;
+ }
+
+ if (!cache_contiguous(&cid, *dclus))
+ cache_init(&cid, *fclus, *dclus);
+ }
+
+ extent_cache_add(inode, &cid);
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/************************************************************************/
+/* */
+/* PROJECT : exFAT & FAT12/16/32 File System */
+/* FILE : fatent.c */
+/* PURPOSE : sdFAT FAT entry manager */
+/* */
+/*----------------------------------------------------------------------*/
+/* NOTES */
+/* */
+/* */
+/************************************************************************/
+
+#include <asm/unaligned.h>
+
+#include "sdfat.h"
+#include "core.h"
+
+/*----------------------------------------------------------------------*/
+/* Global Variable Definitions */
+/*----------------------------------------------------------------------*/
+/* All buffer structures are protected w/ fsi->v_sem */
+
+/*----------------------------------------------------------------------*/
+/* Static functions */
+/*----------------------------------------------------------------------*/
+
+/*======================================================================*/
+/* FAT Read/Write Functions */
+/*======================================================================*/
+/* in : sb, loc
+ * out: content
+ * returns 0 on success, -1 on error
+ */
+static s32 exfat_ent_get(struct super_block *sb, u32 loc, u32 *content)
+{
+ u32 off, _content;
+ u64 sec;
+ u8 *fat_sector;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ /* fsi->vol_type == EXFAT */
+ sec = fsi->FAT1_start_sector + (loc >> (sb->s_blocksize_bits-2));
+ off = (loc << 2) & (u32)(sb->s_blocksize - 1);
+
+ fat_sector = fcache_getblk(sb, sec);
+ if (!fat_sector)
+ return -EIO;
+
+ _content = le32_to_cpu(*(__le32 *)(&fat_sector[off]));
+
+ /* remap reserved clusters to simplify code */
+ if (_content >= CLUSTER_32(0xFFFFFFF8))
+ _content = CLUS_EOF;
+
+ *content = CLUSTER_32(_content);
+ return 0;
+}
+
+static s32 exfat_ent_set(struct super_block *sb, u32 loc, u32 content)
+{
+ u32 off;
+ u64 sec;
+ u8 *fat_sector;
+ __le32 *fat_entry;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ sec = fsi->FAT1_start_sector + (loc >> (sb->s_blocksize_bits-2));
+ off = (loc << 2) & (u32)(sb->s_blocksize - 1);
+
+ fat_sector = fcache_getblk(sb, sec);
+ if (!fat_sector)
+ return -EIO;
+
+ fat_entry = (__le32 *)&(fat_sector[off]);
+ *fat_entry = cpu_to_le32(content);
+
+ return fcache_modify(sb, sec);
+}
+
+#define FATENT_FAT32_VALID_MASK (0x0FFFFFFFU)
+#define FATENT_FAT32_IGNORE_MASK (0xF0000000U)
+static s32 fat32_ent_get(struct super_block *sb, u32 loc, u32 *content)
+{
+ u32 off, _content;
+ u64 sec;
+ u8 *fat_sector;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ sec = fsi->FAT1_start_sector + (loc >> (sb->s_blocksize_bits-2));
+ off = (loc << 2) & (u32)(sb->s_blocksize - 1);
+
+ fat_sector = fcache_getblk(sb, sec);
+ if (!fat_sector)
+ return -EIO;
+
+ _content = le32_to_cpu(*(__le32 *)(&fat_sector[off]));
+ _content &= FATENT_FAT32_VALID_MASK;
+
+ /* remap reserved clusters to simplify code */
+ if (_content == CLUSTER_32(0x0FFFFFF7U))
+ _content = CLUS_BAD;
+ else if (_content >= CLUSTER_32(0x0FFFFFF8U))
+ _content = CLUS_EOF;
+
+ *content = CLUSTER_32(_content);
+ return 0;
+}
+
+static s32 fat32_ent_set(struct super_block *sb, u32 loc, u32 content)
+{
+ u32 off;
+ u64 sec;
+ u8 *fat_sector;
+ __le32 *fat_entry;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ content &= FATENT_FAT32_VALID_MASK;
+
+ sec = fsi->FAT1_start_sector + (loc >> (sb->s_blocksize_bits-2));
+ off = (loc << 2) & (u32)(sb->s_blocksize - 1);
+
+ fat_sector = fcache_getblk(sb, sec);
+ if (!fat_sector)
+ return -EIO;
+
+ fat_entry = (__le32 *)&(fat_sector[off]);
+ content |= (le32_to_cpu(*fat_entry) & FATENT_FAT32_IGNORE_MASK);
+ *fat_entry = cpu_to_le32(content);
+
+ return fcache_modify(sb, sec);
+}
+
+#define FATENT_FAT16_VALID_MASK (0x0000FFFFU)
+static s32 fat16_ent_get(struct super_block *sb, u32 loc, u32 *content)
+{
+ u32 off, _content;
+ u64 sec;
+ u8 *fat_sector;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ sec = fsi->FAT1_start_sector + (loc >> (sb->s_blocksize_bits-1));
+ off = (loc << 1) & (u32)(sb->s_blocksize - 1);
+
+ fat_sector = fcache_getblk(sb, sec);
+ if (!fat_sector)
+ return -EIO;
+
+ _content = (u32)le16_to_cpu(*(__le16 *)(&fat_sector[off]));
+ _content &= FATENT_FAT16_VALID_MASK;
+
+ /* remap reserved clusters to simplify code */
+ if (_content == CLUSTER_16(0xFFF7U))
+ _content = CLUS_BAD;
+ else if (_content >= CLUSTER_16(0xFFF8U))
+ _content = CLUS_EOF;
+
+ *content = CLUSTER_32(_content);
+ return 0;
+}
+
+static s32 fat16_ent_set(struct super_block *sb, u32 loc, u32 content)
+{
+ u32 off;
+ u64 sec;
+ u8 *fat_sector;
+ __le16 *fat_entry;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ content &= FATENT_FAT16_VALID_MASK;
+
+ sec = fsi->FAT1_start_sector + (loc >> (sb->s_blocksize_bits-1));
+ off = (loc << 1) & (u32)(sb->s_blocksize - 1);
+
+ fat_sector = fcache_getblk(sb, sec);
+ if (!fat_sector)
+ return -EIO;
+
+ fat_entry = (__le16 *)&(fat_sector[off]);
+ *fat_entry = cpu_to_le16(content);
+
+ return fcache_modify(sb, sec);
+}
+
+#define FATENT_FAT12_VALID_MASK (0x00000FFFU)
+static s32 fat12_ent_get(struct super_block *sb, u32 loc, u32 *content)
+{
+ u32 off, _content;
+ u64 sec;
+ u8 *fat_sector;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ sec = fsi->FAT1_start_sector + ((loc + (loc >> 1)) >> sb->s_blocksize_bits);
+ off = (loc + (loc >> 1)) & (u32)(sb->s_blocksize - 1);
+
+ fat_sector = fcache_getblk(sb, sec);
+ if (!fat_sector)
+ return -EIO;
+
+ if (off == (u32)(sb->s_blocksize - 1)) {
+ _content = (u32) fat_sector[off];
+
+ fat_sector = fcache_getblk(sb, ++sec);
+ if (!fat_sector)
+ return -EIO;
+
+ _content |= (u32) fat_sector[0] << 8;
+ } else {
+ _content = get_unaligned_le16(&fat_sector[off]);
+ }
+
+ if (loc & 1)
+ _content >>= 4;
+
+ _content &= FATENT_FAT12_VALID_MASK;
+
+ /* remap reserved clusters to simplify code */
+ if (_content == CLUSTER_16(0x0FF7U))
+ _content = CLUS_BAD;
+ else if (_content >= CLUSTER_16(0x0FF8U))
+ _content = CLUS_EOF;
+
+ *content = CLUSTER_32(_content);
+ return 0;
+}
+
+static s32 fat12_ent_set(struct super_block *sb, u32 loc, u32 content)
+{
+ u32 off;
+ u64 sec;
+ u8 *fat_sector, *fat_entry;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ content &= FATENT_FAT12_VALID_MASK;
+
+ sec = fsi->FAT1_start_sector + ((loc + (loc >> 1)) >> sb->s_blocksize_bits);
+ off = (loc + (loc >> 1)) & (u32)(sb->s_blocksize - 1);
+
+ fat_sector = fcache_getblk(sb, sec);
+ if (!fat_sector)
+ return -EIO;
+
+ if (loc & 1) { /* odd */
+
+ content <<= 4;
+
+ if (off == (u32)(sb->s_blocksize-1)) {
+ fat_sector[off] = (u8)(content | (fat_sector[off] & 0x0F));
+ if (fcache_modify(sb, sec))
+ return -EIO;
+
+ fat_sector = fcache_getblk(sb, ++sec);
+ if (!fat_sector)
+ return -EIO;
+
+ fat_sector[0] = (u8)(content >> 8);
+ } else {
+ fat_entry = &(fat_sector[off]);
+ content |= 0x000F & get_unaligned_le16(fat_entry);
+ put_unaligned_le16(content, fat_entry);
+ }
+ } else { /* even */
+ fat_sector[off] = (u8)(content);
+
+ if (off == (u32)(sb->s_blocksize-1)) {
+ fat_sector[off] = (u8)(content);
+ if (fcache_modify(sb, sec))
+ return -EIO;
+
+ fat_sector = fcache_getblk(sb, ++sec);
+ if (!fat_sector)
+ return -EIO;
+
+ fat_sector[0] = (u8)((fat_sector[0] & 0xF0) | (content >> 8));
+ } else {
+ fat_entry = &(fat_sector[off]);
+ content |= 0xF000 & get_unaligned_le16(fat_entry);
+ put_unaligned_le16(content, fat_entry);
+ }
+ }
+ return fcache_modify(sb, sec);
+}
+
+
+static FATENT_OPS_T fat12_ent_ops = {
+ fat12_ent_get,
+ fat12_ent_set
+};
+
+static FATENT_OPS_T fat16_ent_ops = {
+ fat16_ent_get,
+ fat16_ent_set
+};
+
+static FATENT_OPS_T fat32_ent_ops = {
+ fat32_ent_get,
+ fat32_ent_set
+};
+
+static FATENT_OPS_T exfat_ent_ops = {
+ exfat_ent_get,
+ exfat_ent_set
+};
+
+s32 fat_ent_ops_init(struct super_block *sb)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ switch (fsi->vol_type) {
+ case EXFAT:
+ fsi->fatent_ops = &exfat_ent_ops;
+ break;
+ case FAT32:
+ fsi->fatent_ops = &fat32_ent_ops;
+ break;
+ case FAT16:
+ fsi->fatent_ops = &fat16_ent_ops;
+ break;
+ case FAT12:
+ fsi->fatent_ops = &fat12_ent_ops;
+ break;
+ default:
+ fsi->fatent_ops = NULL;
+ EMSG("Unknown volume type : %d", (int)fsi->vol_type);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static inline bool is_reserved_clus(u32 clus)
+{
+ if (IS_CLUS_FREE(clus))
+ return true;
+ if (IS_CLUS_EOF(clus))
+ return true;
+ if (IS_CLUS_BAD(clus))
+ return true;
+ return false;
+}
+
+s32 fat_ent_get(struct super_block *sb, u32 loc, u32 *content)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ s32 err;
+
+ if (!is_valid_clus(fsi, loc)) {
+ sdfat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", loc);
+ return -EIO;
+ }
+
+ err = fsi->fatent_ops->ent_get(sb, loc, content);
+ if (err) {
+ sdfat_fs_error(sb, "failed to access to FAT "
+ "(entry 0x%08x, err:%d)", loc, err);
+ return err;
+ }
+
+ if (!is_reserved_clus(*content) && !is_valid_clus(fsi, *content)) {
+ sdfat_fs_error(sb, "invalid access to FAT (entry 0x%08x) "
+ "bogus content (0x%08x)", loc, *content);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+s32 fat_ent_set(struct super_block *sb, u32 loc, u32 content)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ return fsi->fatent_ops->ent_set(sb, loc, content);
+}
+
+s32 fat_ent_get_safe(struct super_block *sb, u32 loc, u32 *content)
+{
+ s32 err = fat_ent_get(sb, loc, content);
+
+ if (err)
+ return err;
+
+ if (IS_CLUS_FREE(*content)) {
+ sdfat_fs_error(sb, "invalid access to FAT free cluster "
+ "(entry 0x%08x)", loc);
+ return -EIO;
+ }
+
+ if (IS_CLUS_BAD(*content)) {
+ sdfat_fs_error(sb, "invalid access to FAT bad cluster "
+ "(entry 0x%08x)", loc);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/* end of fatent.c */
--- /dev/null
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * linux/fs/fat/misc.c
+ *
+ * Written 1992,1993 by Werner Almesberger
+ * 22/11/2000 - Fixed fat_date_unix2dos for dates earlier than 01/01/1980
+ * and date_dos2unix for date==0 by Igor Zhbanov(bsg@uniyar.ac.ru)
+ */
+
+/************************************************************************/
+/* */
+/* PROJECT : exFAT & FAT12/16/32 File System */
+/* FILE : misc.c */
+/* PURPOSE : Helper function for checksum and handing sdFAT error */
+/* */
+/*----------------------------------------------------------------------*/
+/* NOTES */
+/* */
+/* */
+/************************************************************************/
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/time.h>
+#include "sdfat.h"
+#include "version.h"
+
+#ifdef CONFIG_SDFAT_SUPPORT_STLOG
+#ifdef CONFIG_PROC_FSLOG
+#include <linux/fslog.h>
+#else
+#include <linux/stlog.h>
+#endif
+#else
+#define ST_LOG(fmt, ...)
+#endif
+
+/*************************************************************************
+ * FUNCTIONS WHICH HAS KERNEL VERSION DEPENDENCY
+ *************************************************************************/
+#ifdef CONFIG_SDFAT_UEVENT
+static struct kobject sdfat_uevent_kobj;
+
+int sdfat_uevent_init(struct kset *sdfat_kset)
+{
+ int err;
+ struct kobj_type *ktype = get_ktype(&sdfat_kset->kobj);
+
+ sdfat_uevent_kobj.kset = sdfat_kset;
+ err = kobject_init_and_add(&sdfat_uevent_kobj, ktype, NULL, "uevent");
+ if (err)
+ pr_err("[SDFAT] Unable to create sdfat uevent kobj\n");
+
+ return err;
+}
+
+void sdfat_uevent_uninit(void)
+{
+ kobject_del(&sdfat_uevent_kobj);
+ memset(&sdfat_uevent_kobj, 0, sizeof(struct kobject));
+}
+
+void sdfat_uevent_ro_remount(struct super_block *sb)
+{
+ struct block_device *bdev = sb->s_bdev;
+ dev_t bd_dev = bdev ? bdev->bd_dev : 0;
+
+ char major[16], minor[16];
+ char *envp[] = { major, minor, NULL };
+
+ /* Do not trigger uevent if a device has been ejected */
+ if (fsapi_check_bdi_valid(sb))
+ return;
+
+ snprintf(major, sizeof(major), "MAJOR=%d", MAJOR(bd_dev));
+ snprintf(minor, sizeof(minor), "MINOR=%d", MINOR(bd_dev));
+
+ kobject_uevent_env(&sdfat_uevent_kobj, KOBJ_CHANGE, envp);
+
+ ST_LOG("[SDFAT](%s[%d:%d]): Uevent triggered\n",
+ sb->s_id, MAJOR(bd_dev), MINOR(bd_dev));
+}
+#endif
+
+/*
+ * sdfat_fs_error reports a file system problem that might indicate fa data
+ * corruption/inconsistency. Depending on 'errors' mount option the
+ * panic() is called, or error message is printed FAT and nothing is done,
+ * or filesystem is remounted read-only (default behavior).
+ * In case the file system is remounted read-only, it can be made writable
+ * again by remounting it.
+ */
+void __sdfat_fs_error(struct super_block *sb, int report, const char *fmt, ...)
+{
+ struct sdfat_mount_options *opts = &SDFAT_SB(sb)->options;
+ va_list args;
+ struct va_format vaf;
+ struct block_device *bdev = sb->s_bdev;
+ dev_t bd_dev = bdev ? bdev->bd_dev : 0;
+
+ if (report) {
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ pr_err("[SDFAT](%s[%d:%d]):ERR: %pV\n",
+ sb->s_id, MAJOR(bd_dev), MINOR(bd_dev), &vaf);
+#ifdef CONFIG_SDFAT_SUPPORT_STLOG
+ if (opts->errors == SDFAT_ERRORS_RO && !sb_rdonly(sb)) {
+ ST_LOG("[SDFAT](%s[%d:%d]):ERR: %pV\n",
+ sb->s_id, MAJOR(bd_dev), MINOR(bd_dev), &vaf);
+ }
+#endif
+ va_end(args);
+ }
+
+ if (opts->errors == SDFAT_ERRORS_PANIC) {
+ panic("[SDFAT](%s[%d:%d]): fs panic from previous error\n",
+ sb->s_id, MAJOR(bd_dev), MINOR(bd_dev));
+ } else if (opts->errors == SDFAT_ERRORS_RO && !sb_rdonly(sb)) {
+ sb->s_flags |= SB_RDONLY;
+ sdfat_statistics_set_mnt_ro();
+ pr_err("[SDFAT](%s[%d:%d]): Filesystem has been set "
+ "read-only\n", sb->s_id, MAJOR(bd_dev), MINOR(bd_dev));
+#ifdef CONFIG_SDFAT_SUPPORT_STLOG
+ ST_LOG("[SDFAT](%s[%d:%d]): Filesystem has been set read-only\n",
+ sb->s_id, MAJOR(bd_dev), MINOR(bd_dev));
+#endif
+ sdfat_uevent_ro_remount(sb);
+ }
+}
+EXPORT_SYMBOL(__sdfat_fs_error);
+
+/**
+ * __sdfat_msg() - print preformated SDFAT specific messages.
+ * All logs except what uses sdfat_fs_error() should be written by __sdfat_msg()
+ * If 'st' is set, the log is propagated to ST_LOG.
+ */
+void __sdfat_msg(struct super_block *sb, const char *level, int st, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ struct block_device *bdev = sb->s_bdev;
+ dev_t bd_dev = bdev ? bdev->bd_dev : 0;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ /* level means KERN_ pacility level */
+ printk("%s[SDFAT](%s[%d:%d]): %pV\n", level,
+ sb->s_id, MAJOR(bd_dev), MINOR(bd_dev), &vaf);
+#ifdef CONFIG_SDFAT_SUPPORT_STLOG
+ if (st) {
+ ST_LOG("[SDFAT](%s[%d:%d]): %pV\n",
+ sb->s_id, MAJOR(bd_dev), MINOR(bd_dev), &vaf);
+ }
+#endif
+ va_end(args);
+}
+EXPORT_SYMBOL(__sdfat_msg);
+
+void sdfat_log_version(void)
+{
+ pr_info("[SDFAT] Filesystem version %s\n", SDFAT_VERSION);
+#ifdef CONFIG_SDFAT_SUPPORT_STLOG
+ ST_LOG("[SDFAT] Filesystem version %s\n", SDFAT_VERSION);
+#endif
+}
+EXPORT_SYMBOL(sdfat_log_version);
+
+/* <linux/time.h> externs sys_tz
+ * extern struct timezone sys_tz;
+ */
+#define UNIX_SECS_1980 315532800L
+
+#if BITS_PER_LONG == 64
+#define UNIX_SECS_2108 4354819200L
+#endif
+
+/* days between 1970/01/01 and 1980/01/01 (2 leap days) */
+#define DAYS_DELTA_DECADE (365 * 10 + 2)
+/* 120 (2100 - 1980) isn't leap year */
+#define NO_LEAP_YEAR_2100 (120)
+#define IS_LEAP_YEAR(y) (!((y) & 0x3) && (y) != NO_LEAP_YEAR_2100)
+
+#define SECS_PER_MIN (60)
+#define SECS_PER_HOUR (60 * SECS_PER_MIN)
+#define SECS_PER_DAY (24 * SECS_PER_HOUR)
+
+#define MAKE_LEAP_YEAR(leap_year, year) \
+ do { \
+ /* 2100 isn't leap year */ \
+ if (unlikely(year > NO_LEAP_YEAR_2100)) \
+ leap_year = ((year + 3) / 4) - 1; \
+ else \
+ leap_year = ((year + 3) / 4); \
+ } while (0)
+
+/* Linear day numbers of the respective 1sts in non-leap years. */
+static time_t accum_days_in_year[] = {
+ /* Month : N 01 02 03 04 05 06 07 08 09 10 11 12 */
+ 0, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 0, 0, 0,
+};
+
+#define TIMEZONE_SEC(x) ((x) * 15 * SECS_PER_MIN)
+/* Convert a FAT time/date pair to a UNIX date (seconds since 1 1 70). */
+void sdfat_time_fat2unix(struct sdfat_sb_info *sbi, sdfat_timespec_t *ts,
+ DATE_TIME_T *tp)
+{
+ time_t year = tp->Year;
+ time_t ld; /* leap day */
+
+ MAKE_LEAP_YEAR(ld, year);
+
+ if (IS_LEAP_YEAR(year) && (tp->Month) > 2)
+ ld++;
+
+ ts->tv_sec = tp->Second + tp->Minute * SECS_PER_MIN
+ + tp->Hour * SECS_PER_HOUR
+ + (year * 365 + ld + accum_days_in_year[tp->Month]
+ + (tp->Day - 1) + DAYS_DELTA_DECADE) * SECS_PER_DAY;
+
+ ts->tv_nsec = 0;
+
+ /* Treat as local time */
+ if (!sbi->options.tz_utc && !tp->Timezone.valid) {
+ ts->tv_sec += sys_tz.tz_minuteswest * SECS_PER_MIN;
+ return;
+ }
+
+ /* Treat as UTC time */
+ if (!tp->Timezone.valid)
+ return;
+
+ /* Treat as UTC time, but need to adjust timezone to UTC0 */
+ if (tp->Timezone.off <= 0x3F)
+ ts->tv_sec -= TIMEZONE_SEC(tp->Timezone.off);
+ else /* 0x40 <= (tp->Timezone & 0x7F) <=0x7F */
+ ts->tv_sec += TIMEZONE_SEC(0x80 - tp->Timezone.off);
+}
+
+#define TIMEZONE_CUR_OFFSET() ((sys_tz.tz_minuteswest / (-15)) & 0x7F)
+/* Convert linear UNIX date to a FAT time/date pair. */
+void sdfat_time_unix2fat(struct sdfat_sb_info *sbi, sdfat_timespec_t *ts,
+ DATE_TIME_T *tp)
+{
+ bool tz_valid = (sbi->fsi.vol_type == EXFAT) ? true : false;
+ time_t second = ts->tv_sec;
+ time_t day, month, year;
+ time_t ld; /* leap day */
+
+ tp->Timezone.value = 0x00;
+
+ /* Treats as local time with proper time */
+ if (tz_valid || !sbi->options.tz_utc) {
+ second -= sys_tz.tz_minuteswest * SECS_PER_MIN;
+ if (tz_valid) {
+ tp->Timezone.valid = 1;
+ tp->Timezone.off = TIMEZONE_CUR_OFFSET();
+ }
+ }
+
+ /* Jan 1 GMT 00:00:00 1980. But what about another time zone? */
+ if (second < UNIX_SECS_1980) {
+ tp->Second = 0;
+ tp->Minute = 0;
+ tp->Hour = 0;
+ tp->Day = 1;
+ tp->Month = 1;
+ tp->Year = 0;
+ return;
+ }
+#if (BITS_PER_LONG == 64)
+ if (second >= UNIX_SECS_2108) {
+ tp->Second = 59;
+ tp->Minute = 59;
+ tp->Hour = 23;
+ tp->Day = 31;
+ tp->Month = 12;
+ tp->Year = 127;
+ return;
+ }
+#endif
+
+ day = second / SECS_PER_DAY - DAYS_DELTA_DECADE;
+ year = day / 365;
+
+ MAKE_LEAP_YEAR(ld, year);
+ if (year * 365 + ld > day)
+ year--;
+
+ MAKE_LEAP_YEAR(ld, year);
+ day -= year * 365 + ld;
+
+ if (IS_LEAP_YEAR(year) && day == accum_days_in_year[3]) {
+ month = 2;
+ } else {
+ if (IS_LEAP_YEAR(year) && day > accum_days_in_year[3])
+ day--;
+ for (month = 1; month < 12; month++) {
+ if (accum_days_in_year[month + 1] > day)
+ break;
+ }
+ }
+ day -= accum_days_in_year[month];
+
+ tp->Second = second % SECS_PER_MIN;
+ tp->Minute = (second / SECS_PER_MIN) % 60;
+ tp->Hour = (second / SECS_PER_HOUR) % 24;
+ tp->Day = day + 1;
+ tp->Month = month;
+ tp->Year = year;
+}
+
+TIMESTAMP_T *tm_now(struct inode *inode, TIMESTAMP_T *tp)
+{
+ sdfat_timespec_t ts = current_time(inode);
+ DATE_TIME_T dt;
+
+ sdfat_time_unix2fat(SDFAT_SB(inode->i_sb), &ts, &dt);
+
+ tp->year = dt.Year;
+ tp->mon = dt.Month;
+ tp->day = dt.Day;
+ tp->hour = dt.Hour;
+ tp->min = dt.Minute;
+ tp->sec = dt.Second;
+ tp->tz.value = dt.Timezone.value;
+
+ return tp;
+}
+
+u8 calc_chksum_1byte(void *data, s32 len, u8 chksum)
+{
+ s32 i;
+ u8 *c = (u8 *) data;
+
+ for (i = 0; i < len; i++, c++)
+ chksum = (((chksum & 1) << 7) | ((chksum & 0xFE) >> 1)) + *c;
+
+ return chksum;
+}
+
+u16 calc_chksum_2byte(void *data, s32 len, u16 chksum, s32 type)
+{
+ s32 i;
+ u8 *c = (u8 *) data;
+
+ for (i = 0; i < len; i++, c++) {
+ if (((i == 2) || (i == 3)) && (type == CS_DIR_ENTRY))
+ continue;
+ chksum = (((chksum & 1) << 15) | ((chksum & 0xFFFE) >> 1)) + (u16) *c;
+ }
+ return chksum;
+}
+
+#ifdef CONFIG_SDFAT_TRACE_ELAPSED_TIME
+struct timeval __t1, __t2;
+u32 sdfat_time_current_usec(struct timeval *tv)
+{
+ do_gettimeofday(tv);
+ return (u32)(tv->tv_sec*1000000 + tv->tv_usec);
+}
+#endif /* CONFIG_SDFAT_TRACE_ELAPSED_TIME */
+
+#ifdef CONFIG_SDFAT_DBG_CAREFUL
+/* Check the consistency of i_size_ondisk (FAT32, or flags 0x01 only) */
+void sdfat_debug_check_clusters(struct inode *inode)
+{
+ unsigned int num_clusters;
+ volatile uint32_t tmp_fat_chain[50];
+ volatile int tmp_i = 0;
+ volatile unsigned int num_clusters_org, tmp_i = 0;
+ CHAIN_T clu;
+ FILE_ID_T *fid = &(SDFAT_I(inode)->fid);
+ FS_INFO_T *fsi = &(SDFAT_SB(inode->i_sb)->fsi);
+
+ if (SDFAT_I(inode)->i_size_ondisk == 0)
+ num_clusters = 0;
+ else
+ num_clusters = ((SDFAT_I(inode)->i_size_ondisk-1) >> fsi->cluster_size_bits) + 1;
+
+ clu.dir = fid->start_clu;
+ clu.size = num_clusters;
+ clu.flags = fid->flags;
+
+ num_clusters_org = num_clusters;
+
+ if (clu.flags == 0x03)
+ return;
+
+ while (num_clusters > 0) {
+ /* FAT chain logging */
+ tmp_fat_chain[tmp_i] = clu.dir;
+ tmp_i++;
+ if (tmp_i >= 50)
+ tmp_i = 0;
+
+ BUG_ON(IS_CLUS_EOF(clu.dir) || IS_CLUS_FREE(clu.dir));
+
+ if (get_next_clus_safe(inode->i_sb, &(clu.dir)))
+ EMSG("%s: failed to access to FAT\n");
+
+ num_clusters--;
+ }
+
+ BUG_ON(!IS_CLUS_EOF(clu.dir));
+}
+
+#endif /* CONFIG_SDFAT_DBG_CAREFUL */
+
+#ifdef CONFIG_SDFAT_DBG_MSG
+void __sdfat_dmsg(int level, const char *fmt, ...)
+{
+#ifdef CONFIG_SDFAT_DBG_SHOW_PID
+ struct va_format vaf;
+ va_list args;
+
+ /* should check type */
+ if (level > SDFAT_MSG_LEVEL)
+ return;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ /* fmt already includes KERN_ pacility level */
+ printk("[%u] %pV", current->pid, &vaf);
+ va_end(args);
+#else
+ va_list args;
+
+ /* should check type */
+ if (level > SDFAT_MSG_LEVEL)
+ return;
+
+ va_start(args, fmt);
+ /* fmt already includes KERN_ pacility level */
+ vprintk(fmt, args);
+ va_end(args);
+#endif
+}
+#endif
+
--- /dev/null
+/*
+ * fs/mpage.c
+ *
+ * Copyright (C) 2002, Linus Torvalds.
+ *
+ * Contains functions related to preparing and submitting BIOs which contain
+ * multiple pagecache pages.
+ *
+ * 15May2002 Andrew Morton
+ * Initial version
+ * 27Jun2002 axboe@suse.de
+ * use bio_add_page() to build bio's just the right size
+ */
+
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/************************************************************************/
+/* */
+/* PROJECT : exFAT & FAT12/16/32 File System */
+/* FILE : core.c */
+/* PURPOSE : sdFAT glue layer for supporting VFS */
+/* */
+/*----------------------------------------------------------------------*/
+/* NOTES */
+/* */
+/* */
+/************************************************************************/
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/buffer_head.h>
+#include <linux/exportfs.h>
+#include <linux/mount.h>
+#include <linux/vfs.h>
+#include <linux/parser.h>
+#include <linux/uio.h>
+#include <linux/writeback.h>
+#include <linux/log2.h>
+#include <linux/hash.h>
+#include <linux/backing-dev.h>
+#include <linux/sched.h>
+#include <linux/fs_struct.h>
+#include <linux/namei.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/swap.h> /* for mark_page_accessed() */
+#include <asm/current.h>
+#include <asm/unaligned.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
+#include <linux/aio.h>
+#endif
+
+#include "sdfat.h"
+
+#ifdef CONFIG_SDFAT_ALIGNED_MPAGE_WRITE
+
+#define MIN_ALIGNED_SIZE (PAGE_SIZE)
+#define MIN_ALIGNED_SIZE_MASK (MIN_ALIGNED_SIZE - 1)
+
+/*************************************************************************
+ * INNER FUNCTIONS FOR FUNCTIONS WHICH HAS KERNEL VERSION DEPENDENCY
+ *************************************************************************/
+static void __mpage_write_end_io(struct bio *bio, int err);
+
+/*************************************************************************
+ * FUNCTIONS WHICH HAS KERNEL VERSION DEPENDENCY
+ *************************************************************************/
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+ /* EMPTY */
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) */
+static inline void bio_set_dev(struct bio *bio, struct block_device *bdev)
+{
+ bio->bi_bdev = bdev;
+}
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static inline void __sdfat_clean_bdev_aliases(struct block_device *bdev, sector_t block)
+{
+ clean_bdev_aliases(bdev, block, 1);
+}
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) */
+static inline void __sdfat_clean_bdev_aliases(struct block_device *bdev, sector_t block)
+{
+ unmap_underlying_metadata(bdev, block);
+}
+
+static inline int wbc_to_write_flags(struct writeback_control *wbc)
+{
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ return WRITE_SYNC;
+
+ return 0;
+}
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+static inline void __sdfat_submit_bio_write2(int flags, struct bio *bio)
+{
+ bio_set_op_attrs(bio, REQ_OP_WRITE, flags);
+ submit_bio(bio);
+}
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) */
+static inline void __sdfat_submit_bio_write2(int flags, struct bio *bio)
+{
+ submit_bio(WRITE | flags, bio);
+}
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+static inline int bio_get_nr_vecs(struct block_device *bdev)
+{
+ return BIO_MAX_PAGES;
+}
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) */
+ /* EMPTY */
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
+static inline sector_t __sdfat_bio_sector(struct bio *bio)
+{
+ return bio->bi_iter.bi_sector;
+}
+
+static inline void __sdfat_set_bio_sector(struct bio *bio, sector_t sector)
+{
+ bio->bi_iter.bi_sector = sector;
+}
+
+static inline unsigned int __sdfat_bio_size(struct bio *bio)
+{
+ return bio->bi_iter.bi_size;
+}
+
+static inline void __sdfat_set_bio_size(struct bio *bio, unsigned int size)
+{
+ bio->bi_iter.bi_size = size;
+}
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) */
+static inline sector_t __sdfat_bio_sector(struct bio *bio)
+{
+ return bio->bi_sector;
+}
+
+static inline void __sdfat_set_bio_sector(struct bio *bio, sector_t sector)
+{
+ bio->bi_sector = sector;
+}
+
+static inline unsigned int __sdfat_bio_size(struct bio *bio)
+{
+ return bio->bi_size;
+}
+
+static inline void __sdfat_set_bio_size(struct bio *bio, unsigned int size)
+{
+ bio->bi_size = size;
+}
+#endif
+
+/*************************************************************************
+ * MORE FUNCTIONS WHICH HAS KERNEL VERSION DEPENDENCY
+ *************************************************************************/
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
+static void mpage_write_end_io(struct bio *bio)
+{
+ __mpage_write_end_io(bio, blk_status_to_errno(bio->bi_status));
+}
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+static void mpage_write_end_io(struct bio *bio)
+{
+ __mpage_write_end_io(bio, bio->bi_error);
+}
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0) */
+static void mpage_write_end_io(struct bio *bio, int err)
+{
+ if (test_bit(BIO_UPTODATE, &bio->bi_flags))
+ err = 0;
+ __mpage_write_end_io(bio, err);
+}
+#endif
+
+/* __check_dfr_on() and __dfr_writepage_end_io() functions
+ * are copied from sdfat.c
+ * Each function should be same perfectly
+ */
+static inline int __check_dfr_on(struct inode *inode, loff_t start, loff_t end, const char *fname)
+{
+#ifdef CONFIG_SDFAT_DFR
+ struct defrag_info *ino_dfr = &(SDFAT_I(inode)->dfr_info);
+
+ if ((atomic_read(&ino_dfr->stat) == DFR_INO_STAT_REQ) &&
+ fsapi_dfr_check_dfr_on(inode, start, end, 0, fname))
+ return 1;
+#endif
+ return 0;
+}
+
+static inline int __dfr_writepage_end_io(struct page *page)
+{
+#ifdef CONFIG_SDFAT_DFR
+ struct defrag_info *ino_dfr = &(SDFAT_I(page->mapping->host)->dfr_info);
+
+ if (atomic_read(&ino_dfr->stat) == DFR_INO_STAT_REQ)
+ fsapi_dfr_writepage_endio(page);
+#endif
+ return 0;
+}
+
+
+static inline unsigned int __calc_size_to_align(struct super_block *sb)
+{
+ struct block_device *bdev = sb->s_bdev;
+ struct gendisk *disk;
+ struct request_queue *queue;
+ struct queue_limits *limit;
+ unsigned int max_sectors;
+ unsigned int aligned = 0;
+
+ disk = bdev->bd_disk;
+ if (!disk)
+ goto out;
+
+ queue = disk->queue;
+ if (!queue)
+ goto out;
+
+ limit = &queue->limits;
+ max_sectors = limit->max_sectors;
+ aligned = 1 << ilog2(max_sectors);
+
+ if (aligned && (max_sectors & (aligned - 1)))
+ aligned = 0;
+
+ if (aligned && aligned < (MIN_ALIGNED_SIZE >> SECTOR_SIZE_BITS))
+ aligned = 0;
+out:
+ return aligned;
+}
+
+struct mpage_data {
+ struct bio *bio;
+ sector_t last_block_in_bio;
+ get_block_t *get_block;
+ unsigned int use_writepage;
+ unsigned int size_to_align;
+};
+
+/*
+ * After completing I/O on a page, call this routine to update the page
+ * flags appropriately
+ */
+static void __page_write_endio(struct page *page, int err)
+{
+ if (err) {
+ struct address_space *mapping;
+
+ SetPageError(page);
+ mapping = page_mapping(page);
+ if (mapping)
+ mapping_set_error(mapping, err);
+ }
+ __dfr_writepage_end_io(page);
+ end_page_writeback(page);
+}
+
+/*
+ * I/O completion handler for multipage BIOs.
+ *
+ * The mpage code never puts partial pages into a BIO (except for end-of-file).
+ * If a page does not map to a contiguous run of blocks then it simply falls
+ * back to block_read_full_page().
+ *
+ * Why is this? If a page's completion depends on a number of different BIOs
+ * which can complete in any order (or at the same time) then determining the
+ * status of that page is hard. See end_buffer_async_read() for the details.
+ * There is no point in duplicating all that complexity.
+ */
+static void __mpage_write_end_io(struct bio *bio, int err)
+{
+ struct bio_vec *bv;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)
+ struct bvec_iter_all iter_all;
+
+ ASSERT(bio_data_dir(bio) == WRITE); /* only write */
+
+ /* Use bio_for_each_segemnt_all() to support multi-page bvec */
+ bio_for_each_segment_all(bv, bio, iter_all)
+ __page_write_endio(bv->bv_page, err);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0)
+ struct bvec_iter_all iter_all;
+ int i;
+
+ ASSERT(bio_data_dir(bio) == WRITE); /* only write */
+
+ /* Use bio_for_each_segemnt_all() to support multi-page bvec */
+ bio_for_each_segment_all(bv, bio, i, iter_all)
+ __page_write_endio(bv->bv_page, err);
+#else
+ ASSERT(bio_data_dir(bio) == WRITE); /* only write */
+ bv = bio->bi_io_vec + bio->bi_vcnt - 1;
+
+ do {
+ struct page *page = bv->bv_page;
+
+ if (--bv >= bio->bi_io_vec)
+ prefetchw(&bv->bv_page->flags);
+
+ __page_write_endio(page, err);
+ } while (bv >= bio->bi_io_vec);
+#endif
+ bio_put(bio);
+}
+
+static struct bio *mpage_bio_submit_write(int flags, struct bio *bio)
+{
+ bio->bi_end_io = mpage_write_end_io;
+ __sdfat_submit_bio_write2(flags, bio);
+ return NULL;
+}
+
+static struct bio *
+mpage_alloc(struct block_device *bdev,
+ sector_t first_sector, int nr_vecs,
+ gfp_t gfp_flags)
+{
+ struct bio *bio;
+
+ bio = bio_alloc(gfp_flags, nr_vecs);
+
+ if (bio == NULL && (current->flags & PF_MEMALLOC)) {
+ while (!bio && (nr_vecs /= 2))
+ bio = bio_alloc(gfp_flags, nr_vecs);
+ }
+
+ if (bio) {
+ bio_set_dev(bio, bdev);
+ __sdfat_set_bio_sector(bio, first_sector);
+ }
+ return bio;
+}
+
+
+#if IS_BUILTIN(CONFIG_SDFAT_FS)
+#define __write_boundary_block write_boundary_block
+#define sdfat_buffer_heads_over_limit buffer_heads_over_limit
+#else
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
+/*
+ * Called when we've recently written block `bblock', and it is known that
+ * `bblock' was for a buffer_boundary() buffer. This means that the block at
+ * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
+ * dirty, schedule it for IO. So that indirects merge nicely with their data.
+ */
+static void __write_boundary_block(struct block_device *bdev,
+ sector_t bblock, unsigned int blocksize)
+{
+ struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
+
+ if (bh) {
+ if (buffer_dirty(bh))
+ ll_rw_block(REQ_OP_WRITE, 0, 1, &bh);
+ put_bh(bh);
+ }
+}
+#else
+#warning "Need an alternative of write_boundary_block function"
+#define __write_boundary_block write_boundary_block
+#endif
+
+#warning "sdfat could not check buffer_heads_over_limit on module. Assumed zero"
+#define sdfat_buffer_heads_over_limit (0)
+#endif
+
+static void clean_buffers(struct page *page, unsigned int first_unmapped)
+{
+ unsigned int buffer_counter = 0;
+ struct buffer_head *bh, *head;
+
+ if (!page_has_buffers(page))
+ return;
+ head = page_buffers(page);
+ bh = head;
+
+ do {
+ if (buffer_counter++ == first_unmapped)
+ break;
+ clear_buffer_dirty(bh);
+ bh = bh->b_this_page;
+ } while (bh != head);
+
+ /*
+ * we cannot drop the bh if the page is not uptodate or a concurrent
+ * readpage would fail to serialize with the bh and it would read from
+ * disk before we reach the platter.
+ */
+ if (sdfat_buffer_heads_over_limit && PageUptodate(page))
+ try_to_free_buffers(page);
+}
+
+static int sdfat_mpage_writepage(struct page *page,
+ struct writeback_control *wbc, void *data)
+{
+ struct mpage_data *mpd = data;
+ struct bio *bio = mpd->bio;
+ struct address_space *mapping = page->mapping;
+ struct inode *inode = page->mapping->host;
+ const unsigned int blkbits = inode->i_blkbits;
+ const unsigned int blocks_per_page = PAGE_SIZE >> blkbits;
+ sector_t last_block;
+ sector_t block_in_file;
+ sector_t blocks[MAX_BUF_PER_PAGE];
+ unsigned int page_block;
+ unsigned int first_unmapped = blocks_per_page;
+ struct block_device *bdev = NULL;
+ int boundary = 0;
+ sector_t boundary_block = 0;
+ struct block_device *boundary_bdev = NULL;
+ int length;
+ struct buffer_head map_bh;
+ loff_t i_size = i_size_read(inode);
+ unsigned long end_index = i_size >> PAGE_SHIFT;
+ int ret = 0;
+ int op_flags = wbc_to_write_flags(wbc);
+
+ if (page_has_buffers(page)) {
+ struct buffer_head *head = page_buffers(page);
+ struct buffer_head *bh = head;
+
+ /* If they're all mapped and dirty, do it */
+ page_block = 0;
+ do {
+ BUG_ON(buffer_locked(bh));
+ if (!buffer_mapped(bh)) {
+ /*
+ * unmapped dirty buffers are created by
+ * __set_page_dirty_buffers -> mmapped data
+ */
+ if (buffer_dirty(bh))
+ goto confused;
+ if (first_unmapped == blocks_per_page)
+ first_unmapped = page_block;
+ continue;
+ }
+
+ if (first_unmapped != blocks_per_page)
+ goto confused; /* hole -> non-hole */
+
+ if (!buffer_dirty(bh) || !buffer_uptodate(bh))
+ goto confused;
+
+ /* bh should be mapped if delay is set */
+ if (buffer_delay(bh)) {
+ sector_t blk_in_file =
+ (sector_t)(page->index << (PAGE_SHIFT - blkbits)) + page_block;
+
+ BUG_ON(bh->b_size != (1 << blkbits));
+ if (page->index > end_index) {
+ MMSG("%s(inode:%p) "
+ "over end with delayed buffer"
+ "(page_idx:%u, end_idx:%u)\n",
+ __func__, inode,
+ (u32)page->index,
+ (u32)end_index);
+ goto confused;
+ }
+
+ ret = mpd->get_block(inode, blk_in_file, bh, 1);
+ if (ret) {
+ MMSG("%s(inode:%p) "
+ "failed to getblk(ret:%d)\n",
+ __func__, inode, ret);
+ goto confused;
+ }
+
+ BUG_ON(buffer_delay(bh));
+
+ if (buffer_new(bh)) {
+ clear_buffer_new(bh);
+ __sdfat_clean_bdev_aliases(bh->b_bdev, bh->b_blocknr);
+ }
+ }
+
+ if (page_block) {
+ if (bh->b_blocknr != blocks[page_block-1] + 1) {
+ MMSG("%s(inode:%p) pblk(%d) "
+ "no_seq(prev:%lld, new:%lld)\n",
+ __func__, inode, page_block,
+ (u64)blocks[page_block-1],
+ (u64)bh->b_blocknr);
+ goto confused;
+ }
+ }
+ blocks[page_block++] = bh->b_blocknr;
+ boundary = buffer_boundary(bh);
+ if (boundary) {
+ boundary_block = bh->b_blocknr;
+ boundary_bdev = bh->b_bdev;
+ }
+ bdev = bh->b_bdev;
+ } while ((bh = bh->b_this_page) != head);
+
+ if (first_unmapped)
+ goto page_is_mapped;
+
+ /*
+ * Page has buffers, but they are all unmapped. The page was
+ * created by pagein or read over a hole which was handled by
+ * block_read_full_page(). If this address_space is also
+ * using mpage_readpages then this can rarely happen.
+ */
+ goto confused;
+ }
+
+ /*
+ * The page has no buffers: map it to disk
+ */
+ BUG_ON(!PageUptodate(page));
+ block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
+ last_block = (i_size - 1) >> blkbits;
+ map_bh.b_page = page;
+ for (page_block = 0; page_block < blocks_per_page; ) {
+
+ map_bh.b_state = 0;
+ map_bh.b_size = 1 << blkbits;
+ if (mpd->get_block(inode, block_in_file, &map_bh, 1))
+ goto confused;
+
+ if (buffer_new(&map_bh))
+ __sdfat_clean_bdev_aliases(map_bh.b_bdev, map_bh.b_blocknr);
+ if (buffer_boundary(&map_bh)) {
+ boundary_block = map_bh.b_blocknr;
+ boundary_bdev = map_bh.b_bdev;
+ }
+
+ if (page_block) {
+ if (map_bh.b_blocknr != blocks[page_block-1] + 1)
+ goto confused;
+ }
+ blocks[page_block++] = map_bh.b_blocknr;
+ boundary = buffer_boundary(&map_bh);
+ bdev = map_bh.b_bdev;
+ if (block_in_file == last_block)
+ break;
+ block_in_file++;
+ }
+ BUG_ON(page_block == 0);
+
+ first_unmapped = page_block;
+
+page_is_mapped:
+ if (page->index >= end_index) {
+ /*
+ * The page straddles i_size. It must be zeroed out on each
+ * and every writepage invocation because it may be mmapped.
+ * "A file is mapped in multiples of the page size. For a file
+ * that is not a multiple of the page size, the remaining memory
+ * is zeroed when mapped, and writes to that region are not
+ * written out to the file."
+ */
+ unsigned int offset = i_size & (PAGE_SIZE - 1);
+
+ if (page->index > end_index || !offset) {
+ MMSG("%s(inode:%p) over end "
+ "(page_idx:%u, end_idx:%u off:%u)\n",
+ __func__, inode, (u32)page->index,
+ (u32)end_index, (u32)offset);
+ goto confused;
+ }
+ zero_user_segment(page, offset, PAGE_SIZE);
+ }
+
+ /*
+ * This page will go to BIO. Do we need to send this BIO off first?
+ *
+ * REMARK : added ELSE_IF for ALIGNMENT_MPAGE_WRITE of SDFAT
+ */
+ if (bio) {
+ if (mpd->last_block_in_bio != blocks[0] - 1) {
+ bio = mpage_bio_submit_write(op_flags, bio);
+ } else if (mpd->size_to_align) {
+ unsigned int mask = mpd->size_to_align - 1;
+ sector_t max_end_block =
+ (__sdfat_bio_sector(bio) & ~(mask)) + mask;
+
+ if ((__sdfat_bio_size(bio) & MIN_ALIGNED_SIZE_MASK) &&
+ (mpd->last_block_in_bio == max_end_block)) {
+ int op_nomerge = op_flags | REQ_NOMERGE;
+
+ MMSG("%s(inode:%p) alignment mpage_bio_submit"
+ "(start:%u, len:%u size:%u aligned:%u)\n",
+ __func__, inode,
+ (unsigned int)__sdfat_bio_sector(bio),
+ (unsigned int)(mpd->last_block_in_bio -
+ __sdfat_bio_sector(bio) + 1),
+ (unsigned int)__sdfat_bio_size(bio),
+ (unsigned int)mpd->size_to_align);
+ bio = mpage_bio_submit_write(op_nomerge, bio);
+ }
+ }
+ }
+
+alloc_new:
+ if (!bio) {
+ bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
+ bio_get_nr_vecs(bdev), GFP_NOFS|__GFP_HIGH);
+ if (!bio)
+ goto confused;
+ }
+
+ /*
+ * Must try to add the page before marking the buffer clean or
+ * the confused fail path above (OOM) will be very confused when
+ * it finds all bh marked clean (i.e. it will not write anything)
+ */
+ length = first_unmapped << blkbits;
+ if (bio_add_page(bio, page, length, 0) < length) {
+ bio = mpage_bio_submit_write(op_flags, bio);
+ goto alloc_new;
+ }
+
+ /*
+ * OK, we have our BIO, so we can now mark the buffers clean. Make
+ * sure to only clean buffers which we know we'll be writing.
+ */
+ clean_buffers(page, first_unmapped);
+
+ BUG_ON(PageWriteback(page));
+ set_page_writeback(page);
+
+ /*
+ * FIXME FOR DEFRAGMENTATION : CODE REVIEW IS REQUIRED
+ *
+ * Turn off MAPPED flag in victim's bh if defrag on.
+ * Another write_begin can starts after get_block for defrag victims
+ * called.
+ * In this case, write_begin calls get_block and get original block
+ * number and previous defrag will be canceled.
+ */
+ if (unlikely(__check_dfr_on(inode, (loff_t)(page->index << PAGE_SHIFT),
+ (loff_t)((page->index + 1) << PAGE_SHIFT), __func__))) {
+ struct buffer_head *head = page_buffers(page);
+ struct buffer_head *bh = head;
+
+ do {
+ clear_buffer_mapped(bh);
+ bh = bh->b_this_page;
+ } while (bh != head);
+ }
+
+ unlock_page(page);
+ if (boundary || (first_unmapped != blocks_per_page)) {
+ bio = mpage_bio_submit_write(op_flags, bio);
+ if (boundary_block) {
+ __write_boundary_block(boundary_bdev,
+ boundary_block, 1 << blkbits);
+ }
+ } else {
+ mpd->last_block_in_bio = blocks[blocks_per_page - 1];
+ }
+
+ goto out;
+
+confused:
+ if (bio)
+ bio = mpage_bio_submit_write(op_flags, bio);
+
+ if (mpd->use_writepage) {
+ ret = mapping->a_ops->writepage(page, wbc);
+ } else {
+ ret = -EAGAIN;
+ goto out;
+ }
+ /*
+ * The caller has a ref on the inode, so *mapping is stable
+ */
+ mapping_set_error(mapping, ret);
+out:
+ mpd->bio = bio;
+ return ret;
+}
+
+int sdfat_mpage_writepages(struct address_space *mapping,
+ struct writeback_control *wbc, get_block_t *get_block)
+{
+ struct blk_plug plug;
+ int ret;
+ struct mpage_data mpd = {
+ .bio = NULL,
+ .last_block_in_bio = 0,
+ .get_block = get_block,
+ .use_writepage = 1,
+ .size_to_align = __calc_size_to_align(mapping->host->i_sb),
+ };
+
+ BUG_ON(!get_block);
+ blk_start_plug(&plug);
+ ret = write_cache_pages(mapping, wbc, sdfat_mpage_writepage, &mpd);
+ if (mpd.bio) {
+ int op_flags = wbc_to_write_flags(wbc);
+
+ mpage_bio_submit_write(op_flags, mpd.bio);
+ }
+ blk_finish_plug(&plug);
+ return ret;
+}
+
+#endif /* CONFIG_SDFAT_ALIGNED_MPAGE_WRITE */
+
--- /dev/null
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/************************************************************************/
+/* */
+/* PROJECT : exFAT & FAT12/16/32 File System */
+/* FILE : nls.c */
+/* PURPOSE : sdFAT NLS Manager */
+/* */
+/*----------------------------------------------------------------------*/
+/* NOTES */
+/* */
+/* */
+/************************************************************************/
+#include <linux/string.h>
+#include <linux/nls.h>
+
+#include "sdfat.h"
+#include "core.h"
+
+/*----------------------------------------------------------------------*/
+/* Global Variable Definitions */
+/*----------------------------------------------------------------------*/
+
+/*----------------------------------------------------------------------*/
+/* Local Variable Definitions */
+/*----------------------------------------------------------------------*/
+
+static u16 bad_dos_chars[] = {
+ /* + , ; = [ ] */
+ 0x002B, 0x002C, 0x003B, 0x003D, 0x005B, 0x005D,
+ 0xFF0B, 0xFF0C, 0xFF1B, 0xFF1D, 0xFF3B, 0xFF3D,
+ 0
+};
+
+/*
+ * Allow full-width illegal characters :
+ * "MS windows 7" supports full-width-invalid-name-characters.
+ * So we should check half-width-invalid-name-characters(ASCII) only
+ * for compatibility.
+ *
+ * " * / : < > ? \ |
+ *
+ * patch 1.2.0
+ */
+static u16 bad_uni_chars[] = {
+ 0x0022, 0x002A, 0x002F, 0x003A,
+ 0x003C, 0x003E, 0x003F, 0x005C, 0x007C,
+#if 0 /* allow full-width characters */
+ 0x201C, 0x201D, 0xFF0A, 0xFF0F, 0xFF1A,
+ 0xFF1C, 0xFF1E, 0xFF1F, 0xFF3C, 0xFF5C,
+#endif
+ 0
+};
+
+/*----------------------------------------------------------------------*/
+/* Local Function Declarations */
+/*----------------------------------------------------------------------*/
+static s32 convert_uni_to_ch(struct nls_table *nls, u16 uni, u8 *ch, s32 *lossy);
+static s32 convert_ch_to_uni(struct nls_table *nls, u8 *ch, u16 *uni, s32 *lossy);
+
+static u16 nls_upper(struct super_block *sb, u16 a)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ if (SDFAT_SB(sb)->options.casesensitive)
+ return a;
+ if ((fsi->vol_utbl)[get_col_index(a)] != NULL)
+ return (fsi->vol_utbl)[get_col_index(a)][get_row_index(a)];
+ else
+ return a;
+}
+/*======================================================================*/
+/* Global Function Definitions */
+/*======================================================================*/
+u16 *nls_wstrchr(u16 *str, u16 wchar)
+{
+ while (*str) {
+ if (*(str++) == wchar)
+ return str;
+ }
+
+ return 0;
+}
+
+s32 nls_cmp_sfn(struct super_block *sb, u8 *a, u8 *b)
+{
+ return strncmp((void *)a, (void *)b, DOS_NAME_LENGTH);
+}
+
+s32 nls_cmp_uniname(struct super_block *sb, u16 *a, u16 *b)
+{
+ s32 i;
+
+ for (i = 0; i < MAX_NAME_LENGTH; i++, a++, b++) {
+ if (nls_upper(sb, *a) != nls_upper(sb, *b))
+ return 1;
+ if (*a == 0x0)
+ return 0;
+ }
+ return 0;
+}
+
+#define CASE_LOWER_BASE (0x08) /* base is lower case */
+#define CASE_LOWER_EXT (0x10) /* extension is lower case */
+
+s32 nls_uni16s_to_sfn(struct super_block *sb, UNI_NAME_T *p_uniname, DOS_NAME_T *p_dosname, s32 *p_lossy)
+{
+ s32 i, j, len, lossy = NLS_NAME_NO_LOSSY;
+ u8 buf[MAX_CHARSET_SIZE];
+ u8 lower = 0, upper = 0;
+ u8 *dosname = p_dosname->name;
+ u16 *uniname = p_uniname->name;
+ u16 *p, *last_period;
+ struct nls_table *nls = SDFAT_SB(sb)->nls_disk;
+
+ /* DOSNAME is filled with space */
+ for (i = 0; i < DOS_NAME_LENGTH; i++)
+ *(dosname+i) = ' ';
+
+ /* DOT and DOTDOT are handled by VFS layer */
+
+ /* search for the last embedded period */
+ last_period = NULL;
+ for (p = uniname; *p; p++) {
+ if (*p == (u16) '.')
+ last_period = p;
+ }
+
+ i = 0;
+ while (i < DOS_NAME_LENGTH) {
+ if (i == 8) {
+ if (last_period == NULL)
+ break;
+
+ if (uniname <= last_period) {
+ if (uniname < last_period)
+ lossy |= NLS_NAME_OVERLEN;
+ uniname = last_period + 1;
+ }
+ }
+
+ if (*uniname == (u16) '\0') {
+ break;
+ } else if (*uniname == (u16) ' ') {
+ lossy |= NLS_NAME_LOSSY;
+ } else if (*uniname == (u16) '.') {
+ if (uniname < last_period)
+ lossy |= NLS_NAME_LOSSY;
+ else
+ i = 8;
+ } else if (nls_wstrchr(bad_dos_chars, *uniname)) {
+ lossy |= NLS_NAME_LOSSY;
+ *(dosname+i) = '_';
+ i++;
+ } else {
+ len = convert_uni_to_ch(nls, *uniname, buf, &lossy);
+
+ if (len > 1) {
+ if ((i >= 8) && ((i+len) > DOS_NAME_LENGTH))
+ break;
+
+ if ((i < 8) && ((i+len) > 8)) {
+ i = 8;
+ continue;
+ }
+
+ lower = 0xFF;
+
+ for (j = 0; j < len; j++, i++)
+ *(dosname+i) = *(buf+j);
+ } else { /* len == 1 */
+ if ((*buf >= 'a') && (*buf <= 'z')) {
+ *(dosname+i) = *buf - ('a' - 'A');
+
+ lower |= (i < 8) ?
+ CASE_LOWER_BASE :
+ CASE_LOWER_EXT;
+ } else if ((*buf >= 'A') && (*buf <= 'Z')) {
+ *(dosname+i) = *buf;
+
+ upper |= (i < 8) ?
+ CASE_LOWER_BASE :
+ CASE_LOWER_EXT;
+ } else {
+ *(dosname+i) = *buf;
+ }
+ i++;
+ }
+ }
+
+ uniname++;
+ }
+
+ if (*dosname == 0xE5)
+ *dosname = 0x05;
+ if (*uniname != 0x0)
+ lossy |= NLS_NAME_OVERLEN;
+
+ if (upper & lower)
+ p_dosname->name_case = 0xFF;
+ else
+ p_dosname->name_case = lower;
+
+ if (p_lossy)
+ *p_lossy = lossy;
+ return i;
+}
+
+s32 nls_sfn_to_uni16s(struct super_block *sb, DOS_NAME_T *p_dosname, UNI_NAME_T *p_uniname)
+{
+ s32 i = 0, j, n = 0;
+ u8 buf[MAX_DOSNAME_BUF_SIZE];
+ u8 *dosname = p_dosname->name;
+ u16 *uniname = p_uniname->name;
+ struct nls_table *nls = SDFAT_SB(sb)->nls_disk;
+
+ if (*dosname == 0x05) {
+ *buf = 0xE5;
+ i++;
+ n++;
+ }
+
+ for ( ; i < 8; i++, n++) {
+ if (*(dosname+i) == ' ')
+ break;
+
+ if ((*(dosname+i) >= 'A') && (*(dosname+i) <= 'Z') &&
+ (p_dosname->name_case & CASE_LOWER_BASE))
+ *(buf+n) = *(dosname+i) + ('a' - 'A');
+ else
+ *(buf+n) = *(dosname+i);
+ }
+ if (*(dosname+8) != ' ') {
+ *(buf+n) = '.';
+ n++;
+ }
+
+ for (i = 8; i < DOS_NAME_LENGTH; i++, n++) {
+ if (*(dosname+i) == ' ')
+ break;
+
+ if ((*(dosname+i) >= 'A') && (*(dosname+i) <= 'Z') &&
+ (p_dosname->name_case & CASE_LOWER_EXT))
+ *(buf+n) = *(dosname+i) + ('a' - 'A');
+ else
+ *(buf+n) = *(dosname+i);
+ }
+ *(buf+n) = '\0';
+
+ i = j = 0;
+ while (j < MAX_NAME_LENGTH) {
+ if (*(buf+i) == '\0')
+ break;
+
+ i += convert_ch_to_uni(nls, (buf+i), uniname, NULL);
+
+ uniname++;
+ j++;
+ }
+
+ *uniname = (u16) '\0';
+ return j;
+}
+
+static s32 __nls_utf16s_to_vfsname(struct super_block *sb, UNI_NAME_T *p_uniname, u8 *p_cstring, s32 buflen)
+{
+ s32 len;
+ const u16 *uniname = p_uniname->name;
+
+ /* always len >= 0 */
+ len = utf16s_to_utf8s(uniname, MAX_NAME_LENGTH, UTF16_HOST_ENDIAN,
+ p_cstring, buflen);
+ p_cstring[len] = '\0';
+ return len;
+}
+
+static s32 __nls_vfsname_to_utf16s(struct super_block *sb, const u8 *p_cstring,
+ const s32 len, UNI_NAME_T *p_uniname, s32 *p_lossy)
+{
+ s32 i, unilen, lossy = NLS_NAME_NO_LOSSY;
+ u16 upname[MAX_NAME_LENGTH+1];
+ u16 *uniname = p_uniname->name;
+
+ BUG_ON(!len);
+
+ unilen = utf8s_to_utf16s(p_cstring, len, UTF16_HOST_ENDIAN,
+ (wchar_t *)uniname, MAX_NAME_LENGTH+2);
+ if (unilen < 0) {
+ MMSG("%s: failed to vfsname_to_utf16(err:%d) "
+ "vfsnamelen:%d", __func__, unilen, len);
+ return unilen;
+ }
+
+ if (unilen > MAX_NAME_LENGTH) {
+ MMSG("%s: failed to vfsname_to_utf16(estr:ENAMETOOLONG) "
+ "vfsnamelen:%d, unilen:%d>%d",
+ __func__, len, unilen, MAX_NAME_LENGTH);
+ return -ENAMETOOLONG;
+ }
+
+ p_uniname->name_len = (u8)(unilen & 0xFF);
+
+ for (i = 0; i < unilen; i++) {
+ if ((*uniname < 0x0020) || nls_wstrchr(bad_uni_chars, *uniname))
+ lossy |= NLS_NAME_LOSSY;
+
+ *(upname+i) = nls_upper(sb, *uniname);
+ uniname++;
+ }
+
+ *uniname = (u16)'\0';
+ p_uniname->name_len = unilen;
+ p_uniname->name_hash = calc_chksum_2byte((void *) upname,
+ unilen << 1, 0, CS_DEFAULT);
+
+ if (p_lossy)
+ *p_lossy = lossy;
+
+ return unilen;
+}
+
+static s32 __nls_uni16s_to_vfsname(struct super_block *sb, UNI_NAME_T *p_uniname, u8 *p_cstring, s32 buflen)
+{
+ s32 i, j, len, out_len = 0;
+ u8 buf[MAX_CHARSET_SIZE];
+ const u16 *uniname = p_uniname->name;
+ struct nls_table *nls = SDFAT_SB(sb)->nls_io;
+
+ i = 0;
+ while ((i < MAX_NAME_LENGTH) && (out_len < (buflen-1))) {
+ if (*uniname == (u16)'\0')
+ break;
+
+ len = convert_uni_to_ch(nls, *uniname, buf, NULL);
+
+ if (out_len + len >= buflen)
+ len = (buflen - 1) - out_len;
+
+ out_len += len;
+
+ if (len > 1) {
+ for (j = 0; j < len; j++)
+ *p_cstring++ = (s8) *(buf+j);
+ } else { /* len == 1 */
+ *p_cstring++ = (s8) *buf;
+ }
+
+ uniname++;
+ i++;
+ }
+
+ *p_cstring = '\0';
+ return out_len;
+}
+
+static s32 __nls_vfsname_to_uni16s(struct super_block *sb, const u8 *p_cstring,
+ const s32 len, UNI_NAME_T *p_uniname, s32 *p_lossy)
+{
+ s32 i, unilen, lossy = NLS_NAME_NO_LOSSY;
+ u16 upname[MAX_NAME_LENGTH+1];
+ u16 *uniname = p_uniname->name;
+ struct nls_table *nls = SDFAT_SB(sb)->nls_io;
+
+ BUG_ON(!len);
+
+ i = unilen = 0;
+ while ((unilen < MAX_NAME_LENGTH) && (i < len)) {
+ i += convert_ch_to_uni(nls, (u8 *)(p_cstring+i), uniname, &lossy);
+
+ if ((*uniname < 0x0020) || nls_wstrchr(bad_uni_chars, *uniname))
+ lossy |= NLS_NAME_LOSSY;
+
+ *(upname+unilen) = nls_upper(sb, *uniname);
+
+ uniname++;
+ unilen++;
+ }
+
+ if (*(p_cstring+i) != '\0')
+ lossy |= NLS_NAME_OVERLEN;
+
+ *uniname = (u16)'\0';
+ p_uniname->name_len = unilen;
+ p_uniname->name_hash =
+ calc_chksum_2byte((void *) upname, unilen<<1, 0, CS_DEFAULT);
+
+ if (p_lossy)
+ *p_lossy = lossy;
+
+ return unilen;
+}
+
+s32 nls_uni16s_to_vfsname(struct super_block *sb, UNI_NAME_T *uniname, u8 *p_cstring, s32 buflen)
+{
+ if (SDFAT_SB(sb)->options.utf8)
+ return __nls_utf16s_to_vfsname(sb, uniname, p_cstring, buflen);
+
+ return __nls_uni16s_to_vfsname(sb, uniname, p_cstring, buflen);
+}
+
+s32 nls_vfsname_to_uni16s(struct super_block *sb, const u8 *p_cstring, const s32 len, UNI_NAME_T *uniname, s32 *p_lossy)
+{
+ if (SDFAT_SB(sb)->options.utf8)
+ return __nls_vfsname_to_utf16s(sb, p_cstring, len, uniname, p_lossy);
+ return __nls_vfsname_to_uni16s(sb, p_cstring, len, uniname, p_lossy);
+}
+
+/*======================================================================*/
+/* Local Function Definitions */
+/*======================================================================*/
+
+static s32 convert_ch_to_uni(struct nls_table *nls, u8 *ch, u16 *uni, s32 *lossy)
+{
+ int len;
+
+ *uni = 0x0;
+
+ if (ch[0] < 0x80) {
+ *uni = (u16) ch[0];
+ return 1;
+ }
+
+ len = nls->char2uni(ch, MAX_CHARSET_SIZE, uni);
+ if (len < 0) {
+ /* conversion failed */
+ DMSG("%s: fail to use nls\n", __func__);
+ if (lossy != NULL)
+ *lossy |= NLS_NAME_LOSSY;
+ *uni = (u16) '_';
+ if (!strcmp(nls->charset, "utf8"))
+ return 1;
+ return 2;
+ }
+
+ return len;
+} /* end of convert_ch_to_uni */
+
+static s32 convert_uni_to_ch(struct nls_table *nls, u16 uni, u8 *ch, s32 *lossy)
+{
+ int len;
+
+ ch[0] = 0x0;
+
+ if (uni < 0x0080) {
+ ch[0] = (u8) uni;
+ return 1;
+ }
+
+ len = nls->uni2char(uni, ch, MAX_CHARSET_SIZE);
+ if (len < 0) {
+ /* conversion failed */
+ DMSG("%s: fail to use nls\n", __func__);
+ if (lossy != NULL)
+ *lossy |= NLS_NAME_LOSSY;
+ ch[0] = '_';
+ return 1;
+ }
+
+ return len;
+
+} /* end of convert_uni_to_ch */
+
+/* end of nls.c */
--- /dev/null
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/************************************************************************/
+/* */
+/* PROJECT : exFAT & FAT12/16/32 File System */
+/* FILE : core.c */
+/* PURPOSE : sdFAT glue layer for supporting VFS */
+/* */
+/*----------------------------------------------------------------------*/
+/* NOTES */
+/* */
+/* */
+/************************************************************************/
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/pagemap.h>
+#include <linux/mpage.h>
+#include <linux/buffer_head.h>
+#include <linux/exportfs.h>
+#include <linux/mount.h>
+#include <linux/vfs.h>
+#include <linux/parser.h>
+#include <linux/uio.h>
+#include <linux/writeback.h>
+#include <linux/log2.h>
+#include <linux/hash.h>
+#include <linux/backing-dev.h>
+#include <linux/sched.h>
+#include <linux/fs_struct.h>
+#include <linux/namei.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/swap.h> /* for mark_page_accessed() */
+#include <linux/vmalloc.h>
+#include <asm/current.h>
+#include <asm/unaligned.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0)
+#include <linux/iversion.h>
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
+#include <linux/aio.h>
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
+#error SDFAT only supports linux kernel version 3.0 or higher
+#endif
+
+#include "sdfat.h"
+#include "version.h"
+
+/* skip iterating emit_dots when dir is empty */
+#define ITER_POS_FILLED_DOTS (2)
+
+/* type index declare at sdfat.h */
+const char *FS_TYPE_STR[] = {
+ "auto",
+ "exfat",
+ "vfat"
+};
+
+static struct kset *sdfat_kset;
+static struct kmem_cache *sdfat_inode_cachep;
+
+
+static int sdfat_default_codepage = CONFIG_SDFAT_DEFAULT_CODEPAGE;
+static char sdfat_default_iocharset[] = CONFIG_SDFAT_DEFAULT_IOCHARSET;
+static const char sdfat_iocharset_with_utf8[] = "iso8859-1";
+
+#ifdef CONFIG_SDFAT_TRACE_SB_LOCK
+static unsigned long __lock_jiffies;
+#endif
+
+static void sdfat_truncate(struct inode *inode, loff_t old_size);
+static int sdfat_get_block(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create);
+
+static struct inode *sdfat_iget(struct super_block *sb, loff_t i_pos);
+static struct inode *sdfat_build_inode(struct super_block *sb, const FILE_ID_T *fid, loff_t i_pos);
+static void sdfat_detach(struct inode *inode);
+static void sdfat_attach(struct inode *inode, loff_t i_pos);
+static inline unsigned long sdfat_hash(loff_t i_pos);
+static int __sdfat_write_inode(struct inode *inode, int sync);
+static int sdfat_sync_inode(struct inode *inode);
+static int sdfat_write_inode(struct inode *inode, struct writeback_control *wbc);
+static void sdfat_write_super(struct super_block *sb);
+static void sdfat_write_failed(struct address_space *mapping, loff_t to);
+
+static void sdfat_init_namebuf(DENTRY_NAMEBUF_T *nb);
+static int sdfat_alloc_namebuf(DENTRY_NAMEBUF_T *nb);
+static void sdfat_free_namebuf(DENTRY_NAMEBUF_T *nb);
+
+/*************************************************************************
+ * INNER FUNCTIONS FOR FUNCTIONS WHICH HAS KERNEL VERSION DEPENDENCY
+ *************************************************************************/
+static int __sdfat_getattr(struct inode *inode, struct kstat *stat);
+static void __sdfat_writepage_end_io(struct bio *bio, int err);
+static inline void __lock_super(struct super_block *sb);
+static inline void __unlock_super(struct super_block *sb);
+static int __sdfat_create(struct inode *dir, struct dentry *dentry);
+static int __sdfat_revalidate(struct dentry *dentry);
+static int __sdfat_revalidate_ci(struct dentry *dentry, unsigned int flags);
+static int __sdfat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync);
+static struct dentry *__sdfat_lookup(struct inode *dir, struct dentry *dentry);
+static int __sdfat_mkdir(struct inode *dir, struct dentry *dentry);
+static int __sdfat_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry);
+static int __sdfat_show_options(struct seq_file *m, struct super_block *sb);
+static inline ssize_t __sdfat_blkdev_direct_IO(int rw, struct kiocb *iocb,
+ struct inode *inode, void *iov_u, loff_t offset,
+ unsigned long nr_segs);
+static inline ssize_t __sdfat_direct_IO(int rw, struct kiocb *iocb,
+ struct inode *inode, void *iov_u, loff_t offset,
+ loff_t count, unsigned long nr_segs);
+static int __sdfat_d_hash(const struct dentry *dentry, struct qstr *qstr);
+static int __sdfat_d_hashi(const struct dentry *dentry, struct qstr *qstr);
+static int __sdfat_cmp(const struct dentry *dentry, unsigned int len,
+ const char *str, const struct qstr *name);
+static int __sdfat_cmpi(const struct dentry *dentry, unsigned int len,
+ const char *str, const struct qstr *name);
+
+/*************************************************************************
+ * FUNCTIONS WHICH HAS KERNEL VERSION DEPENDENCY
+ *************************************************************************/
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)
+static inline void inode_set_iversion(struct inode *inode, u64 val)
+{
+ inode->i_version = val;
+}
+static inline u64 inode_peek_iversion(struct inode *inode)
+{
+ return inode->i_version;
+}
+#endif
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+ /* EMPTY */
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) */
+static inline void bio_set_dev(struct bio *bio, struct block_device *bdev)
+{
+ bio->bi_bdev = bdev;
+}
+#endif
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+static int sdfat_getattr(const struct path *path, struct kstat *stat,
+ u32 request_mask, unsigned int query_flags)
+{
+ struct inode *inode = d_backing_inode(path->dentry);
+
+ return __sdfat_getattr(inode, stat);
+}
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) */
+static int sdfat_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
+{
+ struct inode *inode = dentry->d_inode;
+
+ return __sdfat_getattr(inode, stat);
+}
+#endif
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static inline void __sdfat_clean_bdev_aliases(struct block_device *bdev, sector_t block)
+{
+ clean_bdev_aliases(bdev, block, 1);
+}
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) */
+static inline void __sdfat_clean_bdev_aliases(struct block_device *bdev, sector_t block)
+{
+ unmap_underlying_metadata(bdev, block);
+}
+
+static inline int wbc_to_write_flags(struct writeback_control *wbc)
+{
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ return WRITE_SYNC;
+
+ return 0;
+}
+#endif
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
+static int sdfat_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
+{
+ /*
+ * The VFS already checks for existence, so for local filesystems
+ * the RENAME_NOREPLACE implementation is equivalent to plain rename.
+ * Don't support any other flags
+ */
+ if (flags & ~RENAME_NOREPLACE)
+ return -EINVAL;
+ return __sdfat_rename(old_dir, old_dentry, new_dir, new_dentry);
+}
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0) */
+static int sdfat_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+{
+ return __sdfat_rename(old_dir, old_dentry, new_dir, new_dentry);
+}
+
+static int setattr_prepare(struct dentry *dentry, struct iattr *attr)
+{
+ struct inode *inode = dentry->d_inode;
+
+ return inode_change_ok(inode, attr);
+}
+#endif
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+static inline void __sdfat_submit_bio_write(struct bio *bio,
+ struct writeback_control *wbc)
+{
+ int write_flags = wbc_to_write_flags(wbc);
+
+ bio_set_op_attrs(bio, REQ_OP_WRITE, write_flags);
+ submit_bio(bio);
+}
+
+static inline unsigned int __sdfat_full_name_hash(const struct dentry *dentry, const char *name, unsigned int len)
+{
+ return full_name_hash(dentry, name, len);
+}
+
+static inline unsigned long __sdfat_init_name_hash(const struct dentry *dentry)
+{
+ return init_name_hash(dentry);
+}
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) */
+static inline void __sdfat_submit_bio_write(struct bio *bio,
+ struct writeback_control *wbc)
+{
+ int write_flags = wbc_to_write_flags(wbc);
+
+ submit_bio(write_flags, bio);
+}
+
+static inline unsigned int __sdfat_full_name_hash(const struct dentry *unused, const char *name, unsigned int len)
+{
+ return full_name_hash(name, len);
+}
+
+static inline unsigned long __sdfat_init_name_hash(const struct dentry *unused)
+{
+ return init_name_hash();
+}
+#endif
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 21)
+ /* EMPTY */
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 21) */
+static inline void inode_lock(struct inode *inode)
+{
+ mutex_lock(&inode->i_mutex);
+}
+
+static inline void inode_unlock(struct inode *inode)
+{
+ mutex_unlock(&inode->i_mutex);
+}
+#endif
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
+static inline int sdfat_remount_syncfs(struct super_block *sb)
+{
+ sync_filesystem(sb);
+ return 0;
+}
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) */
+static inline int sdfat_remount_syncfs(struct super_block *sb)
+{
+ /*
+ * We don`t need to call sync_filesystem(sb),
+ * Because VFS calls it.
+ */
+ return 0;
+}
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
+ /* EMPTY */
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) */
+static inline void truncate_inode_pages_final(struct address_space *mapping)
+{
+ truncate_inode_pages(mapping, 0);
+}
+#endif
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
+static inline sector_t __sdfat_bio_sector(struct bio *bio)
+{
+ return bio->bi_iter.bi_sector;
+}
+
+static inline void __sdfat_set_bio_iterate(struct bio *bio, sector_t sector,
+ unsigned int size, unsigned int idx, unsigned int done)
+{
+ struct bvec_iter *iter = &(bio->bi_iter);
+
+ iter->bi_sector = sector;
+ iter->bi_size = size;
+ iter->bi_idx = idx;
+ iter->bi_bvec_done = done;
+}
+
+static void __sdfat_truncate_pagecache(struct inode *inode,
+ loff_t to, loff_t newsize)
+{
+ truncate_pagecache(inode, newsize);
+}
+
+static int sdfat_d_hash(const struct dentry *dentry, struct qstr *qstr)
+{
+ return __sdfat_d_hash(dentry, qstr);
+}
+
+static int sdfat_d_hashi(const struct dentry *dentry, struct qstr *qstr)
+{
+ return __sdfat_d_hashi(dentry, qstr);
+}
+
+//instead of sdfat_readdir
+static int sdfat_iterate(struct file *filp, struct dir_context *ctx)
+{
+ struct inode *inode = filp->f_path.dentry->d_inode;
+ struct super_block *sb = inode->i_sb;
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ FS_INFO_T *fsi = &(sbi->fsi);
+ DIR_ENTRY_T de;
+ DENTRY_NAMEBUF_T *nb = &(de.NameBuf);
+ unsigned long inum;
+ loff_t cpos;
+ int err = 0, fake_offset = 0;
+
+ sdfat_init_namebuf(nb);
+ __lock_super(sb);
+
+ cpos = ctx->pos;
+ if ((fsi->vol_type == EXFAT) || (inode->i_ino == SDFAT_ROOT_INO)) {
+ if (!dir_emit_dots(filp, ctx))
+ goto out;
+ if (ctx->pos == ITER_POS_FILLED_DOTS) {
+ cpos = 0;
+ fake_offset = 1;
+ }
+ }
+ if (cpos & (DENTRY_SIZE - 1)) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ /* name buffer should be allocated before use */
+ err = sdfat_alloc_namebuf(nb);
+ if (err)
+ goto out;
+get_new:
+ SDFAT_I(inode)->fid.size = i_size_read(inode);
+ SDFAT_I(inode)->fid.rwoffset = cpos >> DENTRY_SIZE_BITS;
+
+ if (cpos >= SDFAT_I(inode)->fid.size)
+ goto end_of_dir;
+
+ err = fsapi_readdir(inode, &de);
+ if (err) {
+ // at least we tried to read a sector
+ // move cpos to next sector position (should be aligned)
+ if (err == -EIO) {
+ cpos += 1 << (sb->s_blocksize_bits);
+ cpos &= ~((u32)sb->s_blocksize-1);
+ }
+
+ err = -EIO;
+ goto end_of_dir;
+ }
+
+ cpos = SDFAT_I(inode)->fid.rwoffset << DENTRY_SIZE_BITS;
+
+ if (!nb->lfn[0])
+ goto end_of_dir;
+
+ if (!memcmp(nb->sfn, DOS_CUR_DIR_NAME, DOS_NAME_LENGTH)) {
+ inum = inode->i_ino;
+ } else if (!memcmp(nb->sfn, DOS_PAR_DIR_NAME, DOS_NAME_LENGTH)) {
+ inum = parent_ino(filp->f_path.dentry);
+ } else {
+ loff_t i_pos = ((loff_t) SDFAT_I(inode)->fid.start_clu << 32) |
+ ((SDFAT_I(inode)->fid.rwoffset-1) & 0xffffffff);
+ struct inode *tmp = sdfat_iget(sb, i_pos);
+
+ if (tmp) {
+ inum = tmp->i_ino;
+ iput(tmp);
+ } else {
+ inum = iunique(sb, SDFAT_ROOT_INO);
+ }
+ }
+
+ /* Before calling dir_emit(), sb_lock should be released.
+ * Because page fault can occur in dir_emit() when the size of buffer given
+ * from user is larger than one page size
+ */
+ __unlock_super(sb);
+ if (!dir_emit(ctx, nb->lfn, strlen(nb->lfn), inum,
+ (de.Attr & ATTR_SUBDIR) ? DT_DIR : DT_REG))
+ goto out_unlocked;
+ __lock_super(sb);
+
+ ctx->pos = cpos;
+ goto get_new;
+
+end_of_dir:
+ if (!cpos && fake_offset)
+ cpos = ITER_POS_FILLED_DOTS;
+ ctx->pos = cpos;
+out:
+ __unlock_super(sb);
+out_unlocked:
+ /*
+ * To improve performance, free namebuf after unlock sb_lock.
+ * If namebuf is not allocated, this function do nothing
+ */
+ sdfat_free_namebuf(nb);
+ return err;
+}
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) */
+static inline sector_t __sdfat_bio_sector(struct bio *bio)
+{
+ return bio->bi_sector;
+}
+
+static inline void __sdfat_set_bio_iterate(struct bio *bio, sector_t sector,
+ unsigned int size, unsigned int idx, unsigned int done)
+{
+ bio->bi_sector = sector;
+ bio->bi_idx = idx;
+ bio->bi_size = size; //PAGE_SIZE;
+}
+
+static void __sdfat_truncate_pagecache(struct inode *inode,
+ loff_t to, loff_t newsize)
+{
+ truncate_pagecache(inode, to, newsize);
+}
+
+static int sdfat_d_hash(const struct dentry *dentry,
+ const struct inode *inode, struct qstr *qstr)
+{
+ return __sdfat_d_hash(dentry, qstr);
+}
+
+static int sdfat_d_hashi(const struct dentry *dentry,
+ const struct inode *inode, struct qstr *qstr)
+{
+ return __sdfat_d_hashi(dentry, qstr);
+}
+
+static int sdfat_readdir(struct file *filp, void *dirent, filldir_t filldir)
+{
+ struct inode *inode = filp->f_path.dentry->d_inode;
+ struct super_block *sb = inode->i_sb;
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ FS_INFO_T *fsi = &(sbi->fsi);
+ DIR_ENTRY_T de;
+ DENTRY_NAMEBUF_T *nb = &(de.NameBuf);
+ unsigned long inum;
+ loff_t cpos;
+ int err = 0, fake_offset = 0;
+
+ sdfat_init_namebuf(nb);
+ __lock_super(sb);
+
+ cpos = filp->f_pos;
+ /* Fake . and .. for the root directory. */
+ if ((fsi->vol_type == EXFAT) || (inode->i_ino == SDFAT_ROOT_INO)) {
+ while (cpos < ITER_POS_FILLED_DOTS) {
+ if (inode->i_ino == SDFAT_ROOT_INO)
+ inum = SDFAT_ROOT_INO;
+ else if (cpos == 0)
+ inum = inode->i_ino;
+ else /* (cpos == 1) */
+ inum = parent_ino(filp->f_path.dentry);
+
+ if (filldir(dirent, "..", cpos+1, cpos, inum, DT_DIR) < 0)
+ goto out;
+ cpos++;
+ filp->f_pos++;
+ }
+ if (cpos == ITER_POS_FILLED_DOTS) {
+ cpos = 0;
+ fake_offset = 1;
+ }
+ }
+ if (cpos & (DENTRY_SIZE - 1)) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ /* name buffer should be allocated before use */
+ err = sdfat_alloc_namebuf(nb);
+ if (err)
+ goto out;
+get_new:
+ SDFAT_I(inode)->fid.size = i_size_read(inode);
+ SDFAT_I(inode)->fid.rwoffset = cpos >> DENTRY_SIZE_BITS;
+
+ if (cpos >= SDFAT_I(inode)->fid.size)
+ goto end_of_dir;
+
+ err = fsapi_readdir(inode, &de);
+ if (err) {
+ // at least we tried to read a sector
+ // move cpos to next sector position (should be aligned)
+ if (err == -EIO) {
+ cpos += 1 << (sb->s_blocksize_bits);
+ cpos &= ~((u32)sb->s_blocksize-1);
+ }
+
+ err = -EIO;
+ goto end_of_dir;
+ }
+
+ cpos = SDFAT_I(inode)->fid.rwoffset << DENTRY_SIZE_BITS;
+
+ if (!nb->lfn[0])
+ goto end_of_dir;
+
+ if (!memcmp(nb->sfn, DOS_CUR_DIR_NAME, DOS_NAME_LENGTH)) {
+ inum = inode->i_ino;
+ } else if (!memcmp(nb->sfn, DOS_PAR_DIR_NAME, DOS_NAME_LENGTH)) {
+ inum = parent_ino(filp->f_path.dentry);
+ } else {
+ loff_t i_pos = ((loff_t) SDFAT_I(inode)->fid.start_clu << 32) |
+ ((SDFAT_I(inode)->fid.rwoffset-1) & 0xffffffff);
+ struct inode *tmp = sdfat_iget(sb, i_pos);
+
+ if (tmp) {
+ inum = tmp->i_ino;
+ iput(tmp);
+ } else {
+ inum = iunique(sb, SDFAT_ROOT_INO);
+ }
+ }
+
+ /* Before calling dir_emit(), sb_lock should be released.
+ * Because page fault can occur in dir_emit() when the size of buffer given
+ * from user is larger than one page size
+ */
+ __unlock_super(sb);
+ if (filldir(dirent, nb->lfn, strlen(nb->lfn), cpos, inum,
+ (de.Attr & ATTR_SUBDIR) ? DT_DIR : DT_REG) < 0)
+ goto out_unlocked;
+ __lock_super(sb);
+
+ filp->f_pos = cpos;
+ goto get_new;
+
+end_of_dir:
+ if (!cpos && fake_offset)
+ cpos = ITER_POS_FILLED_DOTS;
+ filp->f_pos = cpos;
+out:
+ __unlock_super(sb);
+out_unlocked:
+ /*
+ * To improve performance, free namebuf after unlock sb_lock.
+ * If namebuf is not allocated, this function do nothing
+ */
+ sdfat_free_namebuf(nb);
+ return err;
+}
+#endif
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
+ /* EMPTY */
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0) */
+static inline struct inode *file_inode(const struct file *f)
+{
+ return f->f_dentry->d_inode;
+}
+#endif
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
+static inline int __is_sb_dirty(struct super_block *sb)
+{
+ return SDFAT_SB(sb)->s_dirt;
+}
+
+static inline void __set_sb_clean(struct super_block *sb)
+{
+ SDFAT_SB(sb)->s_dirt = 0;
+}
+
+/* Workqueue wrapper for sdfat_write_super () */
+static void __write_super_delayed(struct work_struct *work)
+{
+ struct sdfat_sb_info *sbi;
+ struct super_block *sb;
+
+ sbi = container_of(work, struct sdfat_sb_info, write_super_work.work);
+ sb = sbi->host_sb;
+
+ /* XXX: Is this needed? */
+ if (!sb || !down_read_trylock(&sb->s_umount)) {
+ DMSG("%s: skip delayed work(write_super).\n", __func__);
+ return;
+ }
+
+ DMSG("%s: do delayed_work(write_super).\n", __func__);
+
+ spin_lock(&sbi->work_lock);
+ sbi->write_super_queued = 0;
+ spin_unlock(&sbi->work_lock);
+
+ sdfat_write_super(sb);
+
+ up_read(&sb->s_umount);
+}
+
+static void setup_sdfat_sync_super_wq(struct super_block *sb)
+{
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+
+ mutex_init(&sbi->s_lock);
+ spin_lock_init(&sbi->work_lock);
+ INIT_DELAYED_WORK(&sbi->write_super_work, __write_super_delayed);
+ sbi->host_sb = sb;
+}
+
+static inline bool __cancel_delayed_work_sync(struct sdfat_sb_info *sbi)
+{
+ return cancel_delayed_work_sync(&sbi->write_super_work);
+}
+
+static inline void lock_super(struct super_block *sb)
+{
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+
+ mutex_lock(&sbi->s_lock);
+}
+
+static inline void unlock_super(struct super_block *sb)
+{
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+
+ mutex_unlock(&sbi->s_lock);
+}
+
+static int sdfat_revalidate(struct dentry *dentry, unsigned int flags)
+{
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+
+ return __sdfat_revalidate(dentry);
+}
+
+static int sdfat_revalidate_ci(struct dentry *dentry, unsigned int flags)
+{
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+
+ return __sdfat_revalidate_ci(dentry, flags);
+}
+
+static struct inode *sdfat_iget(struct super_block *sb, loff_t i_pos)
+{
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ struct sdfat_inode_info *info;
+ struct hlist_head *head = sbi->inode_hashtable + sdfat_hash(i_pos);
+ struct inode *inode = NULL;
+
+ spin_lock(&sbi->inode_hash_lock);
+ hlist_for_each_entry(info, head, i_hash_fat) {
+ BUG_ON(info->vfs_inode.i_sb != sb);
+
+ if (i_pos != info->i_pos)
+ continue;
+ inode = igrab(&info->vfs_inode);
+ if (inode)
+ break;
+ }
+ spin_unlock(&sbi->inode_hash_lock);
+ return inode;
+}
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0) */
+static inline int __is_sb_dirty(struct super_block *sb)
+{
+ return sb->s_dirt;
+}
+
+static inline void __set_sb_clean(struct super_block *sb)
+{
+ sb->s_dirt = 0;
+}
+
+static void setup_sdfat_sync_super_wq(struct super_block *sb)
+{
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+
+ sbi->host_sb = sb;
+}
+
+static inline bool __cancel_delayed_work_sync(struct sdfat_sb_info *sbi)
+{
+ /* DO NOTHING */
+ return 0;
+}
+
+static inline void clear_inode(struct inode *inode)
+{
+ end_writeback(inode);
+}
+
+static int sdfat_revalidate(struct dentry *dentry, struct nameidata *nd)
+{
+ if (nd && nd->flags & LOOKUP_RCU)
+ return -ECHILD;
+
+ return __sdfat_revalidate(dentry);
+}
+
+static int sdfat_revalidate_ci(struct dentry *dentry, struct nameidata *nd)
+{
+ if (nd && nd->flags & LOOKUP_RCU)
+ return -ECHILD;
+
+ return __sdfat_revalidate_ci(dentry, nd ? nd->flags : 0);
+
+}
+
+static struct inode *sdfat_iget(struct super_block *sb, loff_t i_pos)
+{
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ struct sdfat_inode_info *info;
+ struct hlist_node *node;
+ struct hlist_head *head = sbi->inode_hashtable + sdfat_hash(i_pos);
+ struct inode *inode = NULL;
+
+ spin_lock(&sbi->inode_hash_lock);
+ hlist_for_each_entry(info, node, head, i_hash_fat) {
+ BUG_ON(info->vfs_inode.i_sb != sb);
+
+ if (i_pos != info->i_pos)
+ continue;
+ inode = igrab(&info->vfs_inode);
+ if (inode)
+ break;
+ }
+ spin_unlock(&sbi->inode_hash_lock);
+ return inode;
+}
+#endif
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
+static struct dentry *sdfat_lookup(struct inode *dir, struct dentry *dentry,
+ unsigned int flags)
+{
+ return __sdfat_lookup(dir, dentry);
+}
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0) */
+static struct dentry *sdfat_lookup(struct inode *dir, struct dentry *dentry,
+ struct nameidata *nd)
+{
+ return __sdfat_lookup(dir, dentry);
+}
+#endif
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
+ /* NOTHING NOW */
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0) */
+#define GLOBAL_ROOT_UID (0)
+#define GLOBAL_ROOT_GID (0)
+
+static inline bool uid_eq(uid_t left, uid_t right)
+{
+ return left == right;
+}
+
+static inline bool gid_eq(gid_t left, gid_t right)
+{
+ return left == right;
+}
+
+static inline uid_t from_kuid_munged(struct user_namespace *to, uid_t kuid)
+{
+ return kuid;
+}
+
+static inline gid_t from_kgid_munged(struct user_namespace *to, gid_t kgid)
+{
+ return kgid;
+}
+
+static inline uid_t make_kuid(struct user_namespace *from, uid_t uid)
+{
+ return uid;
+}
+
+static inline gid_t make_kgid(struct user_namespace *from, gid_t gid)
+{
+ return gid;
+}
+#endif
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
+static struct dentry *__d_make_root(struct inode *root_inode)
+{
+ return d_make_root(root_inode);
+}
+
+static void __sdfat_do_truncate(struct inode *inode, loff_t old, loff_t new)
+{
+ down_write(&SDFAT_I(inode)->truncate_lock);
+ truncate_setsize(inode, new);
+ sdfat_truncate(inode, old);
+ up_write(&SDFAT_I(inode)->truncate_lock);
+}
+
+static sector_t sdfat_aop_bmap(struct address_space *mapping, sector_t block)
+{
+ sector_t blocknr;
+
+ /* sdfat_get_cluster() assumes the requested blocknr isn't truncated. */
+ down_read(&SDFAT_I(mapping->host)->truncate_lock);
+ blocknr = generic_block_bmap(mapping, block, sdfat_get_block);
+ up_read(&SDFAT_I(mapping->host)->truncate_lock);
+ return blocknr;
+}
+
+static int sdfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+ return __sdfat_mkdir(dir, dentry);
+}
+
+static int sdfat_show_options(struct seq_file *m, struct dentry *root)
+{
+ return __sdfat_show_options(m, root->d_sb);
+}
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) */
+static inline void set_nlink(struct inode *inode, unsigned int nlink)
+{
+ inode->i_nlink = nlink;
+}
+
+static struct dentry *__d_make_root(struct inode *root_inode)
+{
+ return d_alloc_root(root_inode);
+}
+
+static void __sdfat_do_truncate(struct inode *inode, loff_t old, loff_t new)
+{
+ truncate_setsize(inode, new);
+ sdfat_truncate(inode, old);
+}
+
+static sector_t sdfat_aop_bmap(struct address_space *mapping, sector_t block)
+{
+ sector_t blocknr;
+
+ /* sdfat_get_cluster() assumes the requested blocknr isn't truncated. */
+ down_read(&mapping->host->i_alloc_sem);
+ blocknr = generic_block_bmap(mapping, block, sdfat_get_block);
+ up_read(&mapping->host->i_alloc_sem);
+ return blocknr;
+}
+
+static int sdfat_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+ return __sdfat_mkdir(dir, dentry);
+}
+
+static int sdfat_show_options(struct seq_file *m, struct vfsmount *mnt)
+{
+ return __sdfat_show_options(m, mnt->mnt_sb);
+}
+#endif
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)
+#define __sdfat_generic_file_fsync(filp, start, end, datasync) \
+ generic_file_fsync(filp, start, end, datasync)
+
+static int sdfat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
+{
+ return __sdfat_file_fsync(filp, start, end, datasync);
+}
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0) */
+#define __sdfat_generic_file_fsync(filp, start, end, datasync) \
+ generic_file_fsync(filp, datasync)
+static int sdfat_file_fsync(struct file *filp, int datasync)
+{
+ return __sdfat_file_fsync(filp, 0, 0, datasync);
+}
+#endif
+
+/*************************************************************************
+ * MORE FUNCTIONS WHICH HAS KERNEL VERSION DEPENDENCY
+ *************************************************************************/
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
+static void sdfat_writepage_end_io(struct bio *bio)
+{
+ __sdfat_writepage_end_io(bio, blk_status_to_errno(bio->bi_status));
+}
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+static void sdfat_writepage_end_io(struct bio *bio)
+{
+ __sdfat_writepage_end_io(bio, bio->bi_error);
+}
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) */
+static void sdfat_writepage_end_io(struct bio *bio, int err)
+{
+ if (test_bit(BIO_UPTODATE, &bio->bi_flags))
+ err = 0;
+ __sdfat_writepage_end_io(bio, err);
+}
+#endif
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+static int sdfat_cmp(const struct dentry *dentry,
+ unsigned int len, const char *str, const struct qstr *name)
+{
+ return __sdfat_cmp(dentry, len, str, name);
+}
+
+static int sdfat_cmpi(const struct dentry *dentry,
+ unsigned int len, const char *str, const struct qstr *name)
+{
+ return __sdfat_cmpi(dentry, len, str, name);
+}
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
+static int sdfat_cmp(const struct dentry *parent, const struct dentry *dentry,
+ unsigned int len, const char *str, const struct qstr *name)
+{
+ return __sdfat_cmp(dentry, len, str, name);
+}
+
+static int sdfat_cmpi(const struct dentry *parent, const struct dentry *dentry,
+ unsigned int len, const char *str, const struct qstr *name)
+{
+ return __sdfat_cmpi(dentry, len, str, name);
+}
+#else
+static int sdfat_cmp(const struct dentry *parent, const struct inode *pinode,
+ const struct dentry *dentry, const struct inode *inode,
+ unsigned int len, const char *str, const struct qstr *name)
+{
+ return __sdfat_cmp(dentry, len, str, name);
+}
+
+static int sdfat_cmpi(const struct dentry *parent, const struct inode *pinode,
+ const struct dentry *dentry, const struct inode *inode,
+ unsigned int len, const char *str, const struct qstr *name)
+{
+ return __sdfat_cmpi(dentry, len, str, name);
+}
+#endif
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+static ssize_t sdfat_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct file *file = iocb->ki_filp;
+ struct address_space *mapping = file->f_mapping;
+ struct inode *inode = mapping->host;
+ size_t count = iov_iter_count(iter);
+ int rw = iov_iter_rw(iter);
+ loff_t offset = iocb->ki_pos;
+
+ return __sdfat_direct_IO(rw, iocb, inode,
+ (void *)iter, offset, count, 0 /* UNUSED */);
+}
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+static ssize_t sdfat_direct_IO(struct kiocb *iocb,
+ struct iov_iter *iter,
+ loff_t offset)
+{
+ struct file *file = iocb->ki_filp;
+ struct address_space *mapping = file->f_mapping;
+ struct inode *inode = mapping->host;
+ size_t count = iov_iter_count(iter);
+ int rw = iov_iter_rw(iter);
+
+ return __sdfat_direct_IO(rw, iocb, inode,
+ (void *)iter, offset, count, 0 /* UNUSED */);
+}
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
+static ssize_t sdfat_direct_IO(int rw, struct kiocb *iocb,
+ struct iov_iter *iter,
+ loff_t offset)
+{
+ struct file *file = iocb->ki_filp;
+ struct address_space *mapping = file->f_mapping;
+ struct inode *inode = mapping->host;
+ size_t count = iov_iter_count(iter);
+
+ return __sdfat_direct_IO(rw, iocb, inode,
+ (void *)iter, offset, count, 0 /* UNUSED */);
+}
+#else
+static ssize_t sdfat_direct_IO(int rw, struct kiocb *iocb,
+ const struct iovec *iov, loff_t offset, unsigned long nr_segs)
+{
+ struct file *file = iocb->ki_filp;
+ struct address_space *mapping = file->f_mapping;
+ struct inode *inode = mapping->host;
+ size_t count = iov_length(iov, nr_segs);
+
+ return __sdfat_direct_IO(rw, iocb, inode,
+ (void *)iov, offset, count, nr_segs);
+}
+#endif
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+static inline ssize_t __sdfat_blkdev_direct_IO(int unused, struct kiocb *iocb,
+ struct inode *inode, void *iov_u, loff_t unused_1,
+ unsigned long nr_segs)
+{
+ struct iov_iter *iter = (struct iov_iter *)iov_u;
+
+ return blockdev_direct_IO(iocb, inode, iter, sdfat_get_block);
+}
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+static inline ssize_t __sdfat_blkdev_direct_IO(int unused, struct kiocb *iocb,
+ struct inode *inode, void *iov_u, loff_t offset,
+ unsigned long nr_segs)
+{
+ struct iov_iter *iter = (struct iov_iter *)iov_u;
+
+ return blockdev_direct_IO(iocb, inode, iter, offset, sdfat_get_block);
+}
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
+static inline ssize_t __sdfat_blkdev_direct_IO(int rw, struct kiocb *iocb,
+ struct inode *inode, void *iov_u, loff_t offset,
+ unsigned long nr_segs)
+{
+ struct iov_iter *iter = (struct iov_iter *)iov_u;
+
+ return blockdev_direct_IO(rw, iocb, inode, iter,
+ offset, sdfat_get_block);
+}
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
+static inline ssize_t __sdfat_blkdev_direct_IO(int rw, struct kiocb *iocb,
+ struct inode *inode, void *iov_u, loff_t offset,
+ unsigned long nr_segs)
+{
+ const struct iovec *iov = (const struct iovec *)iov_u;
+
+ return blockdev_direct_IO(rw, iocb, inode, iov,
+ offset, nr_segs, sdfat_get_block);
+}
+#else
+static inline ssize_t __sdfat_blkdev_direct_IO(int rw, struct kiocb *iocb,
+ struct inode *inode, void *iov_u, loff_t offset,
+ unsigned long nr_segs)
+{
+ const struct iovec *iov = (const struct iovec *)iov_u;
+
+ return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
+ offset, nr_segs, sdfat_get_block, NULL);
+}
+#endif
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
+static const char *sdfat_follow_link(struct dentry *dentry, struct inode *inode, struct delayed_call *done)
+{
+ struct sdfat_inode_info *ei = SDFAT_I(inode);
+
+ return (char *)(ei->target);
+}
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
+static const char *sdfat_follow_link(struct dentry *dentry, void **cookie)
+{
+ struct sdfat_inode_info *ei = SDFAT_I(dentry->d_inode);
+
+ return *cookie = (char *)(ei->target);
+}
+#else
+static void *sdfat_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+ struct sdfat_inode_info *ei = SDFAT_I(dentry->d_inode);
+
+ nd_set_link(nd, (char *)(ei->target));
+ return NULL;
+}
+#endif
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
+static int sdfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+ bool excl)
+{
+ return __sdfat_create(dir, dentry);
+}
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
+static int sdfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+ struct nameidata *nd)
+{
+ return __sdfat_create(dir, dentry);
+}
+#else
+static int sdfat_create(struct inode *dir, struct dentry *dentry, int mode,
+ struct nameidata *nd)
+{
+ return __sdfat_create(dir, dentry);
+}
+#endif
+
+
+/*************************************************************************
+ * WRAP FUNCTIONS FOR DEBUGGING
+ *************************************************************************/
+#ifdef CONFIG_SDFAT_TRACE_SB_LOCK
+static inline void __lock_super(struct super_block *sb)
+{
+ lock_super(sb);
+ __lock_jiffies = jiffies;
+}
+
+static inline void __unlock_super(struct super_block *sb)
+{
+ int time = ((jiffies - __lock_jiffies) * 1000 / HZ);
+ /* FIXME : error message should be modified */
+ if (time > 10)
+ EMSG("lock_super in %s (%d ms)\n", __func__, time);
+
+ unlock_super(sb);
+}
+#else /* CONFIG_SDFAT_TRACE_SB_LOCK */
+static inline void __lock_super(struct super_block *sb)
+{
+ lock_super(sb);
+}
+
+static inline void __unlock_super(struct super_block *sb)
+{
+ unlock_super(sb);
+}
+#endif /* CONFIG_SDFAT_TRACE_SB_LOCK */
+
+/*************************************************************************
+ * NORMAL FUNCTIONS
+ *************************************************************************/
+static inline loff_t sdfat_make_i_pos(FILE_ID_T *fid)
+{
+ return ((loff_t) fid->dir.dir << 32) | (fid->entry & 0xffffffff);
+}
+
+/*======================================================================*/
+/* Directory Entry Name Buffer Operations */
+/*======================================================================*/
+static void sdfat_init_namebuf(DENTRY_NAMEBUF_T *nb)
+{
+ nb->lfn = NULL;
+ nb->sfn = NULL;
+ nb->lfnbuf_len = 0;
+ nb->sfnbuf_len = 0;
+}
+
+static int sdfat_alloc_namebuf(DENTRY_NAMEBUF_T *nb)
+{
+ nb->lfn = __getname();
+ if (!nb->lfn)
+ return -ENOMEM;
+ nb->sfn = nb->lfn + MAX_VFSNAME_BUF_SIZE;
+ nb->lfnbuf_len = MAX_VFSNAME_BUF_SIZE;
+ nb->sfnbuf_len = MAX_VFSNAME_BUF_SIZE;
+ return 0;
+}
+
+static void sdfat_free_namebuf(DENTRY_NAMEBUF_T *nb)
+{
+ if (!nb->lfn)
+ return;
+
+ __putname(nb->lfn);
+ sdfat_init_namebuf(nb);
+}
+
+/*======================================================================*/
+/* Directory Entry Operations */
+/*======================================================================*/
+#define SDFAT_DSTATE_LOCKED (void *)(0xCAFE2016)
+#define SDFAT_DSTATE_UNLOCKED (void *)(0x00000000)
+
+static inline void __lock_d_revalidate(struct dentry *dentry)
+{
+ spin_lock(&dentry->d_lock);
+ dentry->d_fsdata = SDFAT_DSTATE_LOCKED;
+ spin_unlock(&dentry->d_lock);
+}
+
+static inline void __unlock_d_revalidate(struct dentry *dentry)
+{
+ spin_lock(&dentry->d_lock);
+ dentry->d_fsdata = SDFAT_DSTATE_UNLOCKED;
+ spin_unlock(&dentry->d_lock);
+}
+
+/* __check_dstate_locked requires dentry->d_lock */
+static inline int __check_dstate_locked(struct dentry *dentry)
+{
+ if (dentry->d_fsdata == SDFAT_DSTATE_LOCKED)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * If new entry was created in the parent, it could create the 8.3
+ * alias (the shortname of logname). So, the parent may have the
+ * negative-dentry which matches the created 8.3 alias.
+ *
+ * If it happened, the negative dentry isn't actually negative
+ * anymore. So, drop it.
+ */
+static int __sdfat_revalidate_common(struct dentry *dentry)
+{
+ int ret = 1;
+
+ spin_lock(&dentry->d_lock);
+ if ((!dentry->d_inode) && (!__check_dstate_locked(dentry) &&
+ (dentry->d_time !=
+ (unsigned long)inode_peek_iversion(dentry->d_parent->d_inode)))) {
+ ret = 0;
+ }
+ spin_unlock(&dentry->d_lock);
+ return ret;
+}
+
+static int __sdfat_revalidate(struct dentry *dentry)
+{
+ /* This is not negative dentry. Always valid. */
+ if (dentry->d_inode)
+ return 1;
+ return __sdfat_revalidate_common(dentry);
+}
+
+static int __sdfat_revalidate_ci(struct dentry *dentry, unsigned int flags)
+{
+ /*
+ * This is not negative dentry. Always valid.
+ *
+ * Note, rename() to existing directory entry will have ->d_inode,
+ * and will use existing name which isn't specified name by user.
+ *
+ * We may be able to drop this positive dentry here. But dropping
+ * positive dentry isn't good idea. So it's unsupported like
+ * rename("filename", "FILENAME") for now.
+ */
+ if (dentry->d_inode)
+ return 1;
+#if 0 /* Blocked below code for lookup_one_len() called by stackable FS */
+ /*
+ * This may be nfsd (or something), anyway, we can't see the
+ * intent of this. So, since this can be for creation, drop it.
+ */
+ if (!flags)
+ return 0;
+#endif
+ /*
+ * Drop the negative dentry, in order to make sure to use the
+ * case sensitive name which is specified by user if this is
+ * for creation.
+ */
+ if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
+ return 0;
+ return __sdfat_revalidate_common(dentry);
+}
+
+
+/* returns the length of a struct qstr, ignoring trailing dots */
+static unsigned int __sdfat_striptail_len(unsigned int len, const char *name)
+{
+ while (len && name[len - 1] == '.')
+ len--;
+ return len;
+}
+
+static unsigned int sdfat_striptail_len(const struct qstr *qstr)
+{
+ return __sdfat_striptail_len(qstr->len, qstr->name);
+}
+
+/*
+ * Compute the hash for the sdfat name corresponding to the dentry.
+ * Note: if the name is invalid, we leave the hash code unchanged so
+ * that the existing dentry can be used. The sdfat fs routines will
+ * return ENOENT or EINVAL as appropriate.
+ */
+static int __sdfat_d_hash(const struct dentry *dentry, struct qstr *qstr)
+{
+ unsigned int len = sdfat_striptail_len(qstr);
+
+ qstr->hash = __sdfat_full_name_hash(dentry, qstr->name, len);
+ return 0;
+}
+
+/*
+ * Compute the hash for the sdfat name corresponding to the dentry.
+ * Note: if the name is invalid, we leave the hash code unchanged so
+ * that the existing dentry can be used. The sdfat fs routines will
+ * return ENOENT or EINVAL as appropriate.
+ */
+static int __sdfat_d_hashi(const struct dentry *dentry, struct qstr *qstr)
+{
+ struct nls_table *t = SDFAT_SB(dentry->d_sb)->nls_io;
+ const unsigned char *name;
+ unsigned int len;
+ unsigned long hash;
+
+ name = qstr->name;
+ len = sdfat_striptail_len(qstr);
+
+ hash = __sdfat_init_name_hash(dentry);
+ while (len--)
+ hash = partial_name_hash(nls_tolower(t, *name++), hash);
+ qstr->hash = end_name_hash(hash);
+
+ return 0;
+}
+
+/*
+ * Case sensitive compare of two sdfat names.
+ */
+static int __sdfat_cmp(const struct dentry *dentry, unsigned int len,
+ const char *str, const struct qstr *name)
+{
+ unsigned int alen, blen;
+
+ /* A filename cannot end in '.' or we treat it like it has none */
+ alen = sdfat_striptail_len(name);
+ blen = __sdfat_striptail_len(len, str);
+ if (alen == blen) {
+ if (strncmp(name->name, str, alen) == 0)
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ * Case insensitive compare of two sdfat names.
+ */
+static int __sdfat_cmpi(const struct dentry *dentry, unsigned int len,
+ const char *str, const struct qstr *name)
+{
+ struct nls_table *t = SDFAT_SB(dentry->d_sb)->nls_io;
+ unsigned int alen, blen;
+
+ /* A filename cannot end in '.' or we treat it like it has none */
+ alen = sdfat_striptail_len(name);
+ blen = __sdfat_striptail_len(len, str);
+ if (alen == blen) {
+ if (nls_strnicmp(t, name->name, str, alen) == 0)
+ return 0;
+ }
+ return 1;
+}
+
+static const struct dentry_operations sdfat_dentry_ops = {
+ .d_revalidate = sdfat_revalidate,
+ .d_hash = sdfat_d_hash,
+ .d_compare = sdfat_cmp,
+};
+
+static const struct dentry_operations sdfat_ci_dentry_ops = {
+ .d_revalidate = sdfat_revalidate_ci,
+ .d_hash = sdfat_d_hashi,
+ .d_compare = sdfat_cmpi,
+};
+
+#ifdef CONFIG_SDFAT_DFR
+/*----------------------------------------------------------------------*/
+/* Defragmentation related */
+/*----------------------------------------------------------------------*/
+/**
+ * @fn defrag_cleanup_reqs
+ * @brief clean-up defrag info depending on error flag
+ * @return void
+ * @param sb super block
+ * @param error error flag
+ */
+static void defrag_cleanup_reqs(INOUT struct super_block *sb, IN int error)
+{
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ struct defrag_info *sb_dfr = &(sbi->dfr_info);
+ struct defrag_info *ino_dfr = NULL, *tmp = NULL;
+ /* sdfat patch 0.96 : sbi->dfr_info crash problem */
+ __lock_super(sb);
+
+ /* Clean-up ino_dfr */
+ if (!error) {
+ list_for_each_entry_safe(ino_dfr, tmp, &sb_dfr->entry, entry) {
+ struct inode *inode = &(container_of(ino_dfr, struct sdfat_inode_info, dfr_info)->vfs_inode);
+
+ mutex_lock(&ino_dfr->lock);
+
+ atomic_set(&ino_dfr->stat, DFR_INO_STAT_IDLE);
+
+ list_del(&ino_dfr->entry);
+
+ ino_dfr->chunks = NULL;
+ ino_dfr->nr_chunks = 0;
+ INIT_LIST_HEAD(&ino_dfr->entry);
+
+ BUG_ON(!mutex_is_locked(&ino_dfr->lock));
+ mutex_unlock(&ino_dfr->lock);
+
+ iput(inode);
+ }
+ }
+
+ /* Clean-up sb_dfr */
+ sb_dfr->chunks = NULL;
+ sb_dfr->nr_chunks = 0;
+ INIT_LIST_HEAD(&sb_dfr->entry);
+
+ /* Clear dfr_new_clus page */
+ memset(sbi->dfr_new_clus, 0, PAGE_SIZE);
+ sbi->dfr_new_idx = 1;
+ memset(sbi->dfr_page_wb, 0, PAGE_SIZE);
+
+ sbi->dfr_hint_clus = sbi->dfr_hint_idx = sbi->dfr_reserved_clus = 0;
+
+ __unlock_super(sb);
+}
+
+/**
+ * @fn defrag_validate_pages
+ * @brief validate and mark dirty for victiim pages
+ * @return 0 on success, -errno otherwise
+ * @param inode inode
+ * @param chunk given chunk
+ * @remark protected by inode_lock and super_lock
+ */
+static int
+defrag_validate_pages(
+ IN struct inode *inode,
+ IN struct defrag_chunk_info *chunk)
+{
+ struct super_block *sb = inode->i_sb;
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ struct page *page = NULL;
+ unsigned int i_size = 0, page_off = 0, page_nr = 0;
+ int buf_i = 0, i = 0, err = 0;
+
+ i_size = i_size_read(inode);
+ page_off = chunk->f_clus * PAGES_PER_CLUS(sb);
+ page_nr = (i_size / PAGE_SIZE) + ((i_size % PAGE_SIZE) ? 1 : 0);
+ if ((i_size <= 0) || (page_nr <= 0)) {
+ dfr_err("inode %p, i_size %d, page_nr %d", inode, i_size, page_nr);
+ return -EINVAL;
+ }
+
+ /* Get victim pages
+ * and check its dirty/writeback/mapped state
+ */
+ for (i = 0;
+ i < min((int)(page_nr - page_off), (int)(chunk->nr_clus * PAGES_PER_CLUS(sb)));
+ i++) {
+ page = find_get_page(inode->i_mapping, page_off + i);
+ if (page)
+ if (!trylock_page(page)) {
+ put_page(page);
+ page = NULL;
+ }
+
+ if (!page) {
+ dfr_debug("get/lock_page() failed, index %d", i);
+ err = -EINVAL;
+ goto error;
+ }
+
+ sbi->dfr_pagep[buf_i++] = page;
+ if (PageError(page) || !PageUptodate(page) || PageDirty(page) ||
+ PageWriteback(page) || page_mapped(page)) {
+ dfr_debug("page %p, err %d, uptodate %d, "
+ "dirty %d, wb %d, mapped %d",
+ page, PageError(page), PageUptodate(page),
+ PageDirty(page), PageWriteback(page),
+ page_mapped(page));
+ err = -EINVAL;
+ goto error;
+ }
+
+ set_bit((page->index & (PAGES_PER_CLUS(sb) - 1)),
+ (volatile unsigned long *)&(sbi->dfr_page_wb[chunk->new_idx + i / PAGES_PER_CLUS(sb)]));
+
+ page = NULL;
+ }
+
+ /**
+ * All pages in the chunks are valid.
+ */
+ i_size -= (chunk->f_clus * (sbi->fsi.cluster_size));
+ BUG_ON(((i_size / PAGE_SIZE) + ((i_size % PAGE_SIZE) ? 1 : 0)) != (page_nr - page_off));
+
+ for (i = 0; i < buf_i; i++) {
+ struct buffer_head *bh = NULL, *head = NULL;
+ int bh_idx = 0;
+
+ page = sbi->dfr_pagep[i];
+ BUG_ON(!page);
+
+ /* Mark dirty in page */
+ set_page_dirty(page);
+ mark_page_accessed(page);
+
+ /* Attach empty BHs */
+ if (!page_has_buffers(page))
+ create_empty_buffers(page, 1 << inode->i_blkbits, 0);
+
+ /* Mark dirty in BHs */
+ bh = head = page_buffers(page);
+ BUG_ON(!bh && !i_size);
+ do {
+ if ((bh_idx >= 1) && (bh_idx >= (i_size >> inode->i_blkbits))) {
+ clear_buffer_dirty(bh);
+ } else {
+ if (PageUptodate(page))
+ if (!buffer_uptodate(bh))
+ set_buffer_uptodate(bh);
+
+ /* Set this bh as delay */
+ set_buffer_new(bh);
+ set_buffer_delay(bh);
+
+ mark_buffer_dirty(bh);
+ }
+
+ bh_idx++;
+ bh = bh->b_this_page;
+ } while (bh != head);
+
+ /* Mark this page accessed */
+ mark_page_accessed(page);
+
+ i_size -= PAGE_SIZE;
+ }
+
+error:
+ /* Unlock and put refs for pages */
+ for (i = 0; i < buf_i; i++) {
+ BUG_ON(!sbi->dfr_pagep[i]);
+ unlock_page(sbi->dfr_pagep[i]);
+ put_page(sbi->dfr_pagep[i]);
+ }
+ memset(sbi->dfr_pagep, 0, sizeof(PAGE_SIZE));
+
+ return err;
+}
+
+
+/**
+ * @fn defrag_validate_reqs
+ * @brief validate defrag requests
+ * @return negative if all requests not valid, 0 otherwise
+ * @param sb super block
+ * @param chunks given chunks
+ */
+static int
+defrag_validate_reqs(
+ IN struct super_block *sb,
+ INOUT struct defrag_chunk_info *chunks)
+{
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ struct defrag_info *sb_dfr = &(sbi->dfr_info);
+ int i = 0, err = 0, err_cnt = 0;
+
+ /* Validate all reqs */
+ for (i = REQ_HEADER_IDX + 1; i < sb_dfr->nr_chunks; i++) {
+ struct defrag_chunk_info *chunk = NULL;
+ struct inode *inode = NULL;
+ struct defrag_info *ino_dfr = NULL;
+
+ chunk = &chunks[i];
+
+ /* Check inode */
+ __lock_super(sb);
+ inode = sdfat_iget(sb, chunk->i_pos);
+ if (!inode) {
+ dfr_debug("inode not found, i_pos %08llx", chunk->i_pos);
+ chunk->stat = DFR_CHUNK_STAT_ERR;
+ err_cnt++;
+ __unlock_super(sb);
+ continue;
+ }
+ __unlock_super(sb);
+
+ dfr_debug("req[%d] inode %p, i_pos %08llx, f_clus %d, "
+ "d_clus %08x, nr %d, prev %08x, next %08x",
+ i, inode, chunk->i_pos, chunk->f_clus, chunk->d_clus,
+ chunk->nr_clus, chunk->prev_clus, chunk->next_clus);
+ /**
+ * Lock ordering: inode_lock -> lock_super
+ */
+ inode_lock(inode);
+ __lock_super(sb);
+
+ /* Check if enough buffers exist for chunk->new_idx */
+ if ((sbi->dfr_new_idx + chunk->nr_clus) >= (PAGE_SIZE / sizeof(int))) {
+ dfr_err("dfr_new_idx %d, chunk->nr_clus %d",
+ sbi->dfr_new_idx, chunk->nr_clus);
+ err = -ENOSPC;
+ goto unlock;
+ }
+
+ /* Reserve clusters for defrag with DA */
+ err = fsapi_dfr_reserve_clus(sb, chunk->nr_clus);
+ if (err)
+ goto unlock;
+
+ /* Check clusters */
+ err = fsapi_dfr_validate_clus(inode, chunk, 0);
+ if (err) {
+ fsapi_dfr_reserve_clus(sb, 0 - chunk->nr_clus);
+ dfr_debug("Cluster validation: err %d", err);
+ goto unlock;
+ }
+
+ /* Check pages */
+ err = defrag_validate_pages(inode, chunk);
+ if (err) {
+ fsapi_dfr_reserve_clus(sb, 0 - chunk->nr_clus);
+ dfr_debug("Page validation: err %d", err);
+ goto unlock;
+ }
+
+ /* Mark IGNORE flag to victim AU */
+ if (sbi->options.improved_allocation & SDFAT_ALLOC_SMART)
+ fsapi_dfr_mark_ignore(sb, chunk->d_clus);
+
+ ino_dfr = &(SDFAT_I(inode)->dfr_info);
+ mutex_lock(&ino_dfr->lock);
+
+ /* Update chunk info */
+ chunk->stat = DFR_CHUNK_STAT_REQ;
+ chunk->new_idx = sbi->dfr_new_idx;
+
+ /* Update ino_dfr info */
+ if (list_empty(&(ino_dfr->entry))) {
+ list_add_tail(&ino_dfr->entry, &sb_dfr->entry);
+ ino_dfr->chunks = chunk;
+ igrab(inode);
+ }
+ ino_dfr->nr_chunks++;
+
+ atomic_set(&ino_dfr->stat, DFR_INO_STAT_REQ);
+
+ BUG_ON(!mutex_is_locked(&ino_dfr->lock));
+ mutex_unlock(&ino_dfr->lock);
+
+ /* Reserved buffers for chunk->new_idx */
+ sbi->dfr_new_idx += chunk->nr_clus;
+
+unlock:
+ if (err) {
+ chunk->stat = DFR_CHUNK_STAT_ERR;
+ err_cnt++;
+ }
+ iput(inode);
+ __unlock_super(sb);
+ inode_unlock(inode);
+ }
+
+ /* Return error if all chunks are invalid */
+ if (err_cnt == sb_dfr->nr_chunks - 1) {
+ dfr_debug("%s failed (err_cnt %d)", __func__, err_cnt);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+
+/**
+ * @fn defrag_check_fs_busy
+ * @brief check if this module busy
+ * @return 0 when idle, 1 otherwise
+ * @param sb super block
+ * @param reserved_clus # of reserved clusters
+ * @param queued_pages # of queued pages
+ */
+static int
+defrag_check_fs_busy(
+ IN struct super_block *sb,
+ OUT int *reserved_clus,
+ OUT int *queued_pages)
+{
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ int err = 0;
+
+ *reserved_clus = *queued_pages = 0;
+
+ __lock_super(sb);
+ *reserved_clus = fsi->reserved_clusters;
+ *queued_pages = atomic_read(&SDFAT_SB(sb)->stat_n_pages_queued);
+
+ if (*reserved_clus || *queued_pages)
+ err = 1;
+ __unlock_super(sb);
+
+ return err;
+}
+
+
+/**
+ * @fn sdfat_ioctl_defrag_req
+ * @brief ioctl to send defrag requests
+ * @return 0 on success, -errno otherwise
+ * @param inode inode
+ * @param uarg given requests
+ */
+static int
+sdfat_ioctl_defrag_req(
+ IN struct inode *inode,
+ INOUT unsigned int *uarg)
+{
+ struct super_block *sb = inode->i_sb;
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ struct defrag_info *sb_dfr = &(sbi->dfr_info);
+ struct defrag_chunk_header head;
+ struct defrag_chunk_info *chunks = NULL;
+ unsigned int len = 0;
+ int err = 0;
+ unsigned long timeout = 0;
+
+ /* Check overlapped defrag */
+ if (atomic_cmpxchg(&sb_dfr->stat, DFR_SB_STAT_IDLE, DFR_SB_STAT_REQ)) {
+ dfr_debug("sb_dfr->stat %d", atomic_read(&sb_dfr->stat));
+ return -EBUSY;
+ }
+
+ /* Check if defrag required */
+ __lock_super(sb);
+ if (!fsapi_dfr_check_dfr_required(sb, NULL, NULL, NULL)) {
+ dfr_debug("Not enough space left for defrag (err %d)", -ENOSPC);
+ atomic_set(&sb_dfr->stat, DFR_SB_STAT_IDLE);
+ __unlock_super(sb);
+ return -ENOSPC;
+ }
+ __unlock_super(sb);
+
+ /* Copy args */
+ memset(&head, 0, sizeof(struct defrag_chunk_header));
+ err = copy_from_user(&head, uarg, sizeof(struct defrag_chunk_info));
+ ERR_HANDLE(err);
+
+ /* If FS busy, cancel defrag */
+ if (!(head.mode == DFR_MODE_TEST)) {
+ int reserved_clus = 0, queued_pages = 0;
+
+ err = defrag_check_fs_busy(sb, &reserved_clus, &queued_pages);
+ if (err) {
+ dfr_debug("FS busy, cancel defrag (reserved_clus %d, queued_pages %d)",
+ reserved_clus, queued_pages);
+ err = -EBUSY;
+ goto error;
+ }
+ }
+
+ /* Total length is saved in the chunk header's nr_chunks field */
+ len = head.nr_chunks;
+ ERR_HANDLE2(!len, err, -EINVAL);
+
+ dfr_debug("IOC_DFR_REQ started (mode %d, nr_req %d)", head.mode, len - 1);
+ if (get_order(len * sizeof(struct defrag_chunk_info)) > MAX_ORDER) {
+ dfr_debug("len %d, sizeof(struct defrag_chunk_info) %lu, MAX_ORDER %d",
+ len, sizeof(struct defrag_chunk_info), MAX_ORDER);
+ err = -EINVAL;
+ goto error;
+ }
+ chunks = alloc_pages_exact(len * sizeof(struct defrag_chunk_info),
+ GFP_KERNEL | __GFP_ZERO);
+ ERR_HANDLE2(!chunks, err, -ENOMEM)
+
+ err = copy_from_user(chunks, uarg, len * sizeof(struct defrag_chunk_info));
+ ERR_HANDLE(err);
+
+ /* Initialize sb_dfr */
+ sb_dfr->chunks = chunks;
+ sb_dfr->nr_chunks = len;
+
+ /* Validate reqs & mark defrag/dirty */
+ err = defrag_validate_reqs(sb, sb_dfr->chunks);
+ ERR_HANDLE(err);
+
+ atomic_set(&sb_dfr->stat, DFR_SB_STAT_VALID);
+
+ /* Wait for defrag completion */
+ if (head.mode == DFR_MODE_ONESHOT)
+ timeout = 0;
+ else if (head.mode & DFR_MODE_BACKGROUND)
+ timeout = DFR_DEFAULT_TIMEOUT;
+ else
+ timeout = DFR_MIN_TIMEOUT;
+
+ dfr_debug("Wait for completion (timeout %ld)", timeout);
+ init_completion(&sbi->dfr_complete);
+ timeout = wait_for_completion_timeout(&sbi->dfr_complete, timeout);
+
+ if (!timeout) {
+ /* Force defrag_updat_fat() after timeout. */
+ dfr_debug("Force sync(), mode %d, left-timeout %ld", head.mode, timeout);
+
+ down_read(&sb->s_umount);
+
+ sync_inodes_sb(sb);
+
+ __lock_super(sb);
+ fsapi_dfr_update_fat_next(sb);
+
+ fsapi_sync_fs(sb, 1);
+
+#ifdef CONFIG_SDFAT_DFR_DEBUG
+ /* SPO test */
+ fsapi_dfr_spo_test(sb, DFR_SPO_FAT_NEXT, __func__);
+#endif
+
+ fsapi_dfr_update_fat_prev(sb, 1);
+ fsapi_sync_fs(sb, 1);
+
+ __unlock_super(sb);
+
+ up_read(&sb->s_umount);
+ }
+
+#ifdef CONFIG_SDFAT_DFR_DEBUG
+ /* SPO test */
+ fsapi_dfr_spo_test(sb, DFR_SPO_NORMAL, __func__);
+#endif
+
+ __lock_super(sb);
+ /* Send DISCARD to clean-ed AUs */
+ fsapi_dfr_check_discard(sb);
+
+#ifdef CONFIG_SDFAT_DFR_DEBUG
+ /* SPO test */
+ fsapi_dfr_spo_test(sb, DFR_SPO_DISCARD, __func__);
+#endif
+
+ /* Unmark IGNORE flag to all victim AUs */
+ fsapi_dfr_unmark_ignore_all(sb);
+ __unlock_super(sb);
+
+ err = copy_to_user(uarg, sb_dfr->chunks, sizeof(struct defrag_chunk_info) * len);
+ ERR_HANDLE(err);
+
+error:
+ /* Clean-up sb_dfr & ino_dfr */
+ defrag_cleanup_reqs(sb, err);
+
+ if (chunks)
+ free_pages_exact(chunks, len * sizeof(struct defrag_chunk_info));
+
+ /* Set sb_dfr's state as IDLE */
+ atomic_set(&sb_dfr->stat, DFR_SB_STAT_IDLE);
+
+ dfr_debug("IOC_DFR_REQ done (err %d)", err);
+ return err;
+}
+
+/**
+ * @fn sdfat_ioctl_defrag_trav
+ * @brief ioctl to traverse given directory for defrag
+ * @return 0 on success, -errno otherwise
+ * @param inode inode
+ * @param uarg output buffer
+ */
+static int
+sdfat_ioctl_defrag_trav(
+ IN struct inode *inode,
+ INOUT unsigned int *uarg)
+{
+ struct super_block *sb = inode->i_sb;
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ struct defrag_info *sb_dfr = &(sbi->dfr_info);
+ struct defrag_trav_arg *args = (struct defrag_trav_arg *) sbi->dfr_pagep;
+ struct defrag_trav_header *header = (struct defrag_trav_header *) args;
+ int err = 0;
+
+ /* Check overlapped defrag */
+ if (atomic_cmpxchg(&sb_dfr->stat, DFR_SB_STAT_IDLE, DFR_SB_STAT_REQ)) {
+ dfr_debug("sb_dfr->stat %d", atomic_read(&sb_dfr->stat));
+ return -EBUSY;
+ }
+
+ /* Check if defrag required */
+ __lock_super(sb);
+ if (!fsapi_dfr_check_dfr_required(sb, NULL, NULL, NULL)) {
+ dfr_debug("Not enough space left for defrag (err %d)", -ENOSPC);
+ atomic_set(&sb_dfr->stat, DFR_SB_STAT_IDLE);
+ __unlock_super(sb);
+ return -ENOSPC;
+ }
+ __unlock_super(sb);
+
+ /* Copy args */
+ err = copy_from_user(args, uarg, PAGE_SIZE);
+ ERR_HANDLE(err);
+
+ /**
+ * Check args.
+ * ROOT directory has i_pos = 0 and start_clus = 0 .
+ */
+ if (!(header->type & DFR_TRAV_TYPE_HEADER)) {
+ err = -EINVAL;
+ dfr_debug("type %d, i_pos %08llx, start_clus %08x",
+ header->type, header->i_pos, header->start_clus);
+ goto error;
+ }
+
+ /* If FS busy, cancel defrag */
+ if (!(header->type & DFR_TRAV_TYPE_TEST)) {
+ unsigned int reserved_clus = 0, queued_pages = 0;
+
+ err = defrag_check_fs_busy(sb, &reserved_clus, &queued_pages);
+ if (err) {
+ dfr_debug("FS busy, cancel defrag (reserved_clus %d, queued_pages %d)",
+ reserved_clus, queued_pages);
+ err = -EBUSY;
+ goto error;
+ }
+ }
+
+ /* Scan given directory and gather info */
+ inode_lock(inode);
+ __lock_super(sb);
+ err = fsapi_dfr_scan_dir(sb, (void *)args);
+ __unlock_super(sb);
+ inode_unlock(inode);
+ ERR_HANDLE(err);
+
+ /* Copy the result to user */
+ err = copy_to_user(uarg, args, PAGE_SIZE);
+ ERR_HANDLE(err);
+
+error:
+ memset(sbi->dfr_pagep, 0, PAGE_SIZE);
+
+ atomic_set(&sb_dfr->stat, DFR_SB_STAT_IDLE);
+ return err;
+}
+
+/**
+ * @fn sdfat_ioctl_defrag_info
+ * @brief ioctl to get HW param info
+ * @return 0 on success, -errno otherwise
+ * @param sb super block
+ * @param uarg output buffer
+ */
+static int
+sdfat_ioctl_defrag_info(
+ IN struct super_block *sb,
+ OUT unsigned int *uarg)
+{
+ struct defrag_info_arg info_arg;
+ int err = 0;
+
+ memset(&info_arg, 0, sizeof(struct defrag_info_arg));
+
+ __lock_super(sb);
+ err = fsapi_dfr_get_info(sb, &info_arg);
+ __unlock_super(sb);
+ ERR_HANDLE(err);
+ dfr_debug("IOC_DFR_INFO: sec_per_au %d, hidden_sectors %d",
+ info_arg.sec_per_au, info_arg.hidden_sectors);
+
+ err = copy_to_user(uarg, &info_arg, sizeof(struct defrag_info_arg));
+error:
+ return err;
+}
+
+#endif /* CONFIG_SDFAT_DFR */
+
+static inline int __do_dfr_map_cluster(struct inode *inode, u32 clu_offset, unsigned int *clus_ptr)
+{
+#ifdef CONFIG_SDFAT_DFR
+ return fsapi_dfr_map_clus(inode, clu_offset, clus_ptr);
+#else
+ return 0;
+#endif
+}
+
+static inline int __check_dfr_on(struct inode *inode, loff_t start, loff_t end, const char *fname)
+{
+#ifdef CONFIG_SDFAT_DFR
+ struct defrag_info *ino_dfr = &(SDFAT_I(inode)->dfr_info);
+
+ if ((atomic_read(&ino_dfr->stat) == DFR_INO_STAT_REQ) &&
+ fsapi_dfr_check_dfr_on(inode, start, end, 0, fname))
+ return 1;
+#endif
+ return 0;
+}
+
+static inline int __cancel_dfr_work(struct inode *inode, loff_t start, loff_t end, const char *fname)
+{
+#ifdef CONFIG_SDFAT_DFR
+ struct defrag_info *ino_dfr = &(SDFAT_I(inode)->dfr_info);
+ /* Cancel DEFRAG */
+ if (atomic_read(&ino_dfr->stat) == DFR_INO_STAT_REQ)
+ fsapi_dfr_check_dfr_on(inode, start, end, 1, fname);
+#endif
+ return 0;
+}
+
+static inline int __dfr_writepage_end_io(struct page *page)
+{
+#ifdef CONFIG_SDFAT_DFR
+ struct defrag_info *ino_dfr = &(SDFAT_I(page->mapping->host)->dfr_info);
+
+ if (atomic_read(&ino_dfr->stat) == DFR_INO_STAT_REQ)
+ fsapi_dfr_writepage_endio(page);
+#endif
+ return 0;
+}
+
+static inline void __init_dfr_info(struct inode *inode)
+{
+#ifdef CONFIG_SDFAT_DFR
+ memset(&(SDFAT_I(inode)->dfr_info), 0, sizeof(struct defrag_info));
+ INIT_LIST_HEAD(&(SDFAT_I(inode)->dfr_info.entry));
+ mutex_init(&(SDFAT_I(inode)->dfr_info.lock));
+#endif
+}
+
+static inline int __alloc_dfr_mem_if_required(struct super_block *sb)
+{
+#ifdef CONFIG_SDFAT_DFR
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+
+ if (!sbi->options.defrag)
+ return 0;
+
+ memset(&sbi->dfr_info, 0, sizeof(struct defrag_info));
+ INIT_LIST_HEAD(&(sbi->dfr_info.entry));
+ mutex_init(&(sbi->dfr_info.lock));
+
+ sbi->dfr_new_clus = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!sbi->dfr_new_clus) {
+ dfr_debug("error %d", -ENOMEM);
+ return -ENOMEM;
+ }
+ sbi->dfr_new_idx = 1;
+
+ sbi->dfr_page_wb = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!sbi->dfr_page_wb) {
+ dfr_debug("error %d", -ENOMEM);
+ return -ENOMEM;
+ }
+
+ sbi->dfr_pagep = alloc_pages_exact(sizeof(struct page *) *
+ PAGES_PER_AU(sb), GFP_KERNEL | __GFP_ZERO);
+ if (!sbi->dfr_pagep) {
+ dfr_debug("error %d", -ENOMEM);
+ return -ENOMEM;
+ }
+#endif
+ return 0;
+}
+
+static void __free_dfr_mem_if_required(struct super_block *sb)
+{
+#ifdef CONFIG_SDFAT_DFR
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+
+ if (sbi->dfr_pagep) {
+ free_pages_exact(sbi->dfr_pagep, sizeof(struct page *) * PAGES_PER_AU(sb));
+ sbi->dfr_pagep = NULL;
+ }
+
+ /* thanks for kfree */
+ kfree(sbi->dfr_page_wb);
+ sbi->dfr_page_wb = NULL;
+
+ kfree(sbi->dfr_new_clus);
+ sbi->dfr_new_clus = NULL;
+#endif
+}
+
+
+static int sdfat_file_mmap(struct file *file, struct vm_area_struct *vm_struct)
+{
+ __cancel_dfr_work(file->f_mapping->host,
+ (loff_t)vm_struct->vm_start,
+ (loff_t)(vm_struct->vm_end - 1),
+ __func__);
+
+ return generic_file_mmap(file, vm_struct);
+}
+
+static int sdfat_ioctl_volume_id(struct inode *dir)
+{
+ struct sdfat_sb_info *sbi = SDFAT_SB(dir->i_sb);
+ FS_INFO_T *fsi = &(sbi->fsi);
+
+ return fsi->vol_id;
+}
+
+static int sdfat_dfr_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+#ifdef CONFIG_SDFAT_DFR
+ switch (cmd) {
+ case SDFAT_IOCTL_DFR_INFO: {
+ struct super_block *sb = inode->i_sb;
+ FS_INFO_T *fsi = &SDFAT_SB(sb)->fsi;
+ unsigned int __user *uarg = (unsigned int __user *) arg;
+
+ __lock_super(sb);
+ /* Check FS type (FAT32 only) */
+ if (fsi->vol_type != FAT32) {
+ dfr_err("Defrag not supported, vol_type %d", fsi->vol_type);
+ __unlock_super(sb);
+ return -EPERM;
+
+ }
+
+ /* Check if SB's defrag option enabled */
+ if (!(SDFAT_SB(sb)->options.defrag)) {
+ dfr_err("Defrag not supported, sbi->options.defrag %d", SDFAT_SB(sb)->options.defrag);
+ __unlock_super(sb);
+ return -EPERM;
+ }
+
+ /* Only IOCTL on mount-point allowed */
+ if (filp->f_path.mnt->mnt_root != filp->f_path.dentry) {
+ dfr_err("IOC_DFR_INFO only allowed on ROOT, root %p, dentry %p",
+ filp->f_path.mnt->mnt_root, filp->f_path.dentry);
+ __unlock_super(sb);
+ return -EPERM;
+ }
+ __unlock_super(sb);
+
+ return sdfat_ioctl_defrag_info(sb, uarg);
+ }
+ case SDFAT_IOCTL_DFR_TRAV: {
+ struct super_block *sb = inode->i_sb;
+ FS_INFO_T *fsi = &SDFAT_SB(sb)->fsi;
+ unsigned int __user *uarg = (unsigned int __user *) arg;
+
+ __lock_super(sb);
+ /* Check FS type (FAT32 only) */
+ if (fsi->vol_type != FAT32) {
+ dfr_err("Defrag not supported, vol_type %d", fsi->vol_type);
+ __unlock_super(sb);
+ return -EPERM;
+
+ }
+
+ /* Check if SB's defrag option enabled */
+ if (!(SDFAT_SB(sb)->options.defrag)) {
+ dfr_err("Defrag not supported, sbi->options.defrag %d", SDFAT_SB(sb)->options.defrag);
+ __unlock_super(sb);
+ return -EPERM;
+ }
+ __unlock_super(sb);
+
+ return sdfat_ioctl_defrag_trav(inode, uarg);
+ }
+ case SDFAT_IOCTL_DFR_REQ: {
+ struct super_block *sb = inode->i_sb;
+ FS_INFO_T *fsi = &SDFAT_SB(sb)->fsi;
+ unsigned int __user *uarg = (unsigned int __user *) arg;
+
+ __lock_super(sb);
+
+ /* Check if FS_ERROR occurred */
+ if (sb_rdonly(sb)) {
+ dfr_err("RDONLY partition (err %d)", -EPERM);
+ __unlock_super(sb);
+ return -EPERM;
+ }
+
+ /* Check FS type (FAT32 only) */
+ if (fsi->vol_type != FAT32) {
+ dfr_err("Defrag not supported, vol_type %d", fsi->vol_type);
+ __unlock_super(sb);
+ return -EINVAL;
+
+ }
+
+ /* Check if SB's defrag option enabled */
+ if (!(SDFAT_SB(sb)->options.defrag)) {
+ dfr_err("Defrag not supported, sbi->options.defrag %d", SDFAT_SB(sb)->options.defrag);
+ __unlock_super(sb);
+ return -EPERM;
+ }
+
+ /* Only IOCTL on mount-point allowed */
+ if (filp->f_path.mnt->mnt_root != filp->f_path.dentry) {
+ dfr_err("IOC_DFR_INFO only allowed on ROOT, root %p, dentry %p",
+ filp->f_path.mnt->mnt_root, filp->f_path.dentry);
+ __unlock_super(sb);
+ return -EINVAL;
+ }
+ __unlock_super(sb);
+
+ return sdfat_ioctl_defrag_req(inode, uarg);
+ }
+#ifdef CONFIG_SDFAT_DFR_DEBUG
+ case SDFAT_IOCTL_DFR_SPO_FLAG: {
+ struct sdfat_sb_info *sbi = SDFAT_SB(inode->i_sb);
+ int ret = 0;
+
+ ret = get_user(sbi->dfr_spo_flag, (int __user *)arg);
+ dfr_debug("dfr_spo_flag %d", sbi->dfr_spo_flag);
+
+ return ret;
+ }
+#endif /* CONFIG_SDFAT_DFR_DEBUG */
+ }
+#endif /* CONFIG_SDFAT_DFR */
+
+ /* Inappropriate ioctl for device */
+ return -ENOTTY;
+}
+
+static int sdfat_dbg_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+#ifdef CONFIG_SDFAT_DBG_IOCTL
+ struct super_block *sb = inode->i_sb;
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ unsigned int flags;
+
+ switch (cmd) {
+ case SDFAT_IOC_GET_DEBUGFLAGS:
+ flags = sbi->debug_flags;
+ return put_user(flags, (int __user *)arg);
+ case SDFAT_IOC_SET_DEBUGFLAGS:
+ flags = 0;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (get_user(flags, (int __user *) arg))
+ return -EFAULT;
+
+ __lock_super(sb);
+ sbi->debug_flags = flags;
+ __unlock_super(sb);
+ return 0;
+ case SDFAT_IOCTL_PANIC:
+ panic("ioctl panic for test");
+
+ /* COULD NOT REACH HEAR */
+ return 0;
+ }
+#endif /* CONFIG_SDFAT_DBG_IOCTL */
+ return -ENOTTY;
+}
+
+static long sdfat_generic_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ int err;
+
+ if (cmd == SDFAT_IOCTL_GET_VOLUME_ID)
+ return sdfat_ioctl_volume_id(inode);
+
+ err = sdfat_dfr_ioctl(inode, filp, cmd, arg);
+ if (err != -ENOTTY)
+ return err;
+
+ /* -ENOTTY if inappropriate ioctl for device */
+ return sdfat_dbg_ioctl(inode, filp, cmd, arg);
+}
+
+static int __sdfat_getattr(struct inode *inode, struct kstat *stat)
+{
+ TMSG("%s entered\n", __func__);
+
+ generic_fillattr(inode, stat);
+ stat->blksize = SDFAT_SB(inode->i_sb)->fsi.cluster_size;
+
+ TMSG("%s exited\n", __func__);
+ return 0;
+}
+
+static void __sdfat_writepage_end_io(struct bio *bio, int err)
+{
+ struct page *page = bio->bi_io_vec->bv_page;
+ struct super_block *sb = page->mapping->host->i_sb;
+
+ ASSERT(bio->bi_vcnt == 1); /* Single page endio */
+ ASSERT(bio_data_dir(bio)); /* Write */
+
+ if (err) {
+ SetPageError(page);
+ mapping_set_error(page->mapping, err);
+ }
+
+ __dfr_writepage_end_io(page);
+
+#ifdef CONFIG_SDFAT_TRACE_IO
+ {
+ //struct sdfat_sb_info *sbi = SDFAT_SB(bio->bi_bdev->bd_super);
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+
+ sbi->stat_n_pages_written++;
+ if (page->mapping->host == sb->s_bdev->bd_inode)
+ sbi->stat_n_bdev_pages_written++;
+
+ /* 4 MB = 1024 pages => 0.4 sec (approx.)
+ * 32 KB = 64 pages => 0.025 sec
+ * Min. average latency b/w msgs. ~= 0.025 sec
+ */
+ if ((sbi->stat_n_pages_written & 63) == 0) {
+ DMSG("STAT:%u, %u, %u, %u (Sector #: %u)\n",
+ sbi->stat_n_pages_added, sbi->stat_n_pages_written,
+ sbi->stat_n_bdev_pages_witten,
+ sbi->stat_n_pages_confused,
+ (unsigned int)__sdfat_bio_sector(bio));
+ }
+ }
+#endif
+ end_page_writeback(page);
+ bio_put(bio);
+
+ // Update trace info.
+ atomic_dec(&SDFAT_SB(sb)->stat_n_pages_queued);
+}
+
+
+static int __support_write_inode_sync(struct super_block *sb)
+{
+#ifdef CONFIG_SDFAT_SUPPORT_DIR_SYNC
+#ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+
+ if (sbi->fsi.vol_type != EXFAT)
+ return 0;
+#endif
+ return 1;
+#endif
+ return 0;
+}
+
+
+static int __sdfat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
+{
+ struct inode *inode = filp->f_mapping->host;
+ struct super_block *sb = inode->i_sb;
+ int res, err = 0;
+
+ res = __sdfat_generic_file_fsync(filp, start, end, datasync);
+
+ if (!__support_write_inode_sync(sb))
+ err = fsapi_sync_fs(sb, 1);
+
+ return res ? res : err;
+}
+
+
+static const struct file_operations sdfat_dir_operations = {
+ .llseek = generic_file_llseek,
+ .read = generic_read_dir,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
+ .iterate = sdfat_iterate,
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) */
+ .readdir = sdfat_readdir,
+#endif
+ .fsync = sdfat_file_fsync,
+ .unlocked_ioctl = sdfat_generic_ioctl,
+};
+
+static int __sdfat_create(struct inode *dir, struct dentry *dentry)
+{
+ struct super_block *sb = dir->i_sb;
+ struct inode *inode;
+ sdfat_timespec_t ts;
+ FILE_ID_T fid;
+ loff_t i_pos;
+ int err;
+
+ __lock_super(sb);
+
+ TMSG("%s entered\n", __func__);
+
+ ts = current_time(dir);
+
+ err = fsapi_create(dir, (u8 *) dentry->d_name.name, FM_REGULAR, &fid);
+ if (err)
+ goto out;
+
+ __lock_d_revalidate(dentry);
+
+ inode_inc_iversion(dir);
+ dir->i_ctime = dir->i_mtime = dir->i_atime = ts;
+ if (IS_DIRSYNC(dir))
+ (void) sdfat_sync_inode(dir);
+ else
+ mark_inode_dirty(dir);
+
+ i_pos = sdfat_make_i_pos(&fid);
+ inode = sdfat_build_inode(sb, &fid, i_pos);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ goto out;
+ }
+ inode_inc_iversion(inode);
+ inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
+ /* timestamp is already written, so mark_inode_dirty() is unneeded. */
+
+ d_instantiate(dentry, inode);
+out:
+ __unlock_d_revalidate(dentry);
+ __unlock_super(sb);
+ TMSG("%s exited with err(%d)\n", __func__, err);
+ if (!err)
+ sdfat_statistics_set_create(fid.flags);
+ return err;
+}
+
+
+static int sdfat_find(struct inode *dir, struct qstr *qname, FILE_ID_T *fid)
+{
+ int err;
+
+ if (qname->len == 0)
+ return -ENOENT;
+
+ err = fsapi_lookup(dir, (u8 *) qname->name, fid);
+ if (err)
+ return -ENOENT;
+
+ return 0;
+}
+
+static int sdfat_d_anon_disconn(struct dentry *dentry)
+{
+ return IS_ROOT(dentry) && (dentry->d_flags & DCACHE_DISCONNECTED);
+}
+
+static struct dentry *__sdfat_lookup(struct inode *dir, struct dentry *dentry)
+{
+ struct super_block *sb = dir->i_sb;
+ struct inode *inode;
+ struct dentry *alias;
+ int err;
+ FILE_ID_T fid;
+ loff_t i_pos;
+ u64 ret;
+ mode_t i_mode;
+
+ __lock_super(sb);
+ TMSG("%s entered\n", __func__);
+ err = sdfat_find(dir, &dentry->d_name, &fid);
+ if (err) {
+ if (err == -ENOENT) {
+ inode = NULL;
+ goto out;
+ }
+ goto error;
+ }
+
+ i_pos = sdfat_make_i_pos(&fid);
+ inode = sdfat_build_inode(sb, &fid, i_pos);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ goto error;
+ }
+
+ i_mode = inode->i_mode;
+ if (S_ISLNK(i_mode) && !SDFAT_I(inode)->target) {
+ SDFAT_I(inode)->target = kmalloc((i_size_read(inode)+1), GFP_KERNEL);
+ if (!SDFAT_I(inode)->target) {
+ err = -ENOMEM;
+ goto error;
+ }
+ fsapi_read_link(dir, &fid, SDFAT_I(inode)->target, i_size_read(inode), &ret);
+ *(SDFAT_I(inode)->target + i_size_read(inode)) = '\0';
+ }
+
+ alias = d_find_alias(inode);
+
+ /*
+ * Checking "alias->d_parent == dentry->d_parent" to make sure
+ * FS is not corrupted (especially double linked dir).
+ */
+ if (alias && alias->d_parent == dentry->d_parent &&
+ !sdfat_d_anon_disconn(alias)) {
+
+ /*
+ * Unhashed alias is able to exist because of revalidate()
+ * called by lookup_fast. You can easily make this status
+ * by calling create and lookup concurrently
+ * In such case, we reuse an alias instead of new dentry
+ */
+ if (d_unhashed(alias)) {
+ BUG_ON(alias->d_name.hash_len != dentry->d_name.hash_len);
+ sdfat_msg(sb, KERN_INFO, "rehashed a dentry(%p) "
+ "in read lookup", alias);
+ d_drop(dentry);
+ d_rehash(alias);
+ } else if (!S_ISDIR(i_mode)) {
+ /*
+ * This inode has non anonymous-DCACHE_DISCONNECTED
+ * dentry. This means, the user did ->lookup() by an
+ * another name (longname vs 8.3 alias of it) in past.
+ *
+ * Switch to new one for reason of locality if possible.
+ */
+ d_move(alias, dentry);
+ }
+ iput(inode);
+ __unlock_super(sb);
+ TMSG("%s exited\n", __func__);
+ return alias;
+ }
+ dput(alias);
+out:
+ /* initialize d_time even though it is positive dentry */
+ dentry->d_time = (unsigned long)inode_peek_iversion(dir);
+ __unlock_super(sb);
+
+ dentry = d_splice_alias(inode, dentry);
+
+ TMSG("%s exited\n", __func__);
+ return dentry;
+error:
+ __unlock_super(sb);
+ TMSG("%s exited with err(%d)\n", __func__, err);
+ return ERR_PTR(err);
+}
+
+
+static int sdfat_unlink(struct inode *dir, struct dentry *dentry)
+{
+ struct inode *inode = dentry->d_inode;
+ struct super_block *sb = dir->i_sb;
+ sdfat_timespec_t ts;
+ int err;
+
+ __lock_super(sb);
+
+ TMSG("%s entered\n", __func__);
+
+ ts = current_time(dir);
+
+ SDFAT_I(inode)->fid.size = i_size_read(inode);
+
+ __cancel_dfr_work(inode, 0, SDFAT_I(inode)->fid.size, __func__);
+
+ err = fsapi_unlink(dir, &(SDFAT_I(inode)->fid));
+ if (err)
+ goto out;
+
+ __lock_d_revalidate(dentry);
+
+ inode_inc_iversion(dir);
+ dir->i_mtime = dir->i_atime = ts;
+ if (IS_DIRSYNC(dir))
+ (void) sdfat_sync_inode(dir);
+ else
+ mark_inode_dirty(dir);
+
+ clear_nlink(inode);
+ inode->i_mtime = inode->i_atime = ts;
+ sdfat_detach(inode);
+ dentry->d_time = (unsigned long)inode_peek_iversion(dir);
+out:
+ __unlock_d_revalidate(dentry);
+ __unlock_super(sb);
+ TMSG("%s exited with err(%d)\n", __func__, err);
+ return err;
+}
+
+static int sdfat_symlink(struct inode *dir, struct dentry *dentry, const char *target)
+{
+ struct super_block *sb = dir->i_sb;
+ struct inode *inode;
+ sdfat_timespec_t ts;
+ FILE_ID_T fid;
+ loff_t i_pos;
+ int err;
+ u64 len = (u64) strlen(target);
+ u64 ret;
+
+ /* symlink option check */
+ if (!SDFAT_SB(sb)->options.symlink)
+ return -ENOTSUPP;
+
+ __lock_super(sb);
+
+ TMSG("%s entered\n", __func__);
+
+ ts = current_time(dir);
+
+ err = fsapi_create(dir, (u8 *) dentry->d_name.name, FM_SYMLINK, &fid);
+ if (err)
+ goto out;
+
+ err = fsapi_write_link(dir, &fid, (char *) target, len, &ret);
+
+ if (err) {
+ fsapi_remove(dir, &fid);
+ goto out;
+ }
+
+ __lock_d_revalidate(dentry);
+
+ inode_inc_iversion(dir);
+ dir->i_ctime = dir->i_mtime = dir->i_atime = ts;
+ if (IS_DIRSYNC(dir))
+ (void) sdfat_sync_inode(dir);
+ else
+ mark_inode_dirty(dir);
+
+ i_pos = sdfat_make_i_pos(&fid);
+ inode = sdfat_build_inode(sb, &fid, i_pos);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ goto out;
+ }
+ inode_inc_iversion(inode);
+ inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
+ /* timestamp is already written, so mark_inode_dirty() is unneeded. */
+
+ SDFAT_I(inode)->target = kmalloc((len+1), GFP_KERNEL);
+ if (!SDFAT_I(inode)->target) {
+ err = -ENOMEM;
+ goto out;
+ }
+ memcpy(SDFAT_I(inode)->target, target, len+1);
+
+ d_instantiate(dentry, inode);
+out:
+ __unlock_d_revalidate(dentry);
+ __unlock_super(sb);
+ TMSG("%s exited with err(%d)\n", __func__, err);
+ return err;
+}
+
+
+static int __sdfat_mkdir(struct inode *dir, struct dentry *dentry)
+{
+ struct super_block *sb = dir->i_sb;
+ struct inode *inode;
+ sdfat_timespec_t ts;
+ FILE_ID_T fid;
+ loff_t i_pos;
+ int err;
+
+ __lock_super(sb);
+
+ TMSG("%s entered\n", __func__);
+
+ ts = current_time(dir);
+
+ err = fsapi_mkdir(dir, (u8 *) dentry->d_name.name, &fid);
+ if (err)
+ goto out;
+
+ __lock_d_revalidate(dentry);
+
+ inode_inc_iversion(dir);
+ dir->i_ctime = dir->i_mtime = dir->i_atime = ts;
+ if (IS_DIRSYNC(dir))
+ (void) sdfat_sync_inode(dir);
+ else
+ mark_inode_dirty(dir);
+ inc_nlink(dir);
+
+ i_pos = sdfat_make_i_pos(&fid);
+ inode = sdfat_build_inode(sb, &fid, i_pos);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ goto out;
+ }
+ inode_inc_iversion(inode);
+ inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
+ /* timestamp is already written, so mark_inode_dirty() is unneeded. */
+
+ d_instantiate(dentry, inode);
+
+out:
+ __unlock_d_revalidate(dentry);
+ __unlock_super(sb);
+ TMSG("%s exited with err(%d)\n", __func__, err);
+ if (!err)
+ sdfat_statistics_set_mkdir(fid.flags);
+ return err;
+}
+
+
+static int sdfat_rmdir(struct inode *dir, struct dentry *dentry)
+{
+ struct inode *inode = dentry->d_inode;
+ struct super_block *sb = dir->i_sb;
+ sdfat_timespec_t ts;
+ int err;
+
+ __lock_super(sb);
+
+ TMSG("%s entered\n", __func__);
+
+ ts = current_time(dir);
+
+ SDFAT_I(inode)->fid.size = i_size_read(inode);
+
+ err = fsapi_rmdir(dir, &(SDFAT_I(inode)->fid));
+ if (err)
+ goto out;
+
+ __lock_d_revalidate(dentry);
+
+ inode_inc_iversion(dir);
+ dir->i_mtime = dir->i_atime = ts;
+ if (IS_DIRSYNC(dir))
+ (void) sdfat_sync_inode(dir);
+ else
+ mark_inode_dirty(dir);
+ drop_nlink(dir);
+
+ clear_nlink(inode);
+ inode->i_mtime = inode->i_atime = ts;
+ sdfat_detach(inode);
+ dentry->d_time = (unsigned long)inode_peek_iversion(dir);
+out:
+ __unlock_d_revalidate(dentry);
+ __unlock_super(sb);
+ TMSG("%s exited with err(%d)\n", __func__, err);
+ return err;
+}
+
+static int __sdfat_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+{
+ struct inode *old_inode, *new_inode;
+ struct super_block *sb = old_dir->i_sb;
+ sdfat_timespec_t ts;
+ loff_t i_pos;
+ int err;
+
+ __lock_super(sb);
+
+ TMSG("%s entered\n", __func__);
+
+ old_inode = old_dentry->d_inode;
+ new_inode = new_dentry->d_inode;
+
+ ts = current_time(old_inode);
+
+ SDFAT_I(old_inode)->fid.size = i_size_read(old_inode);
+
+ __cancel_dfr_work(old_inode, 0, 1, __func__);
+
+ err = fsapi_rename(old_dir, &(SDFAT_I(old_inode)->fid), new_dir, new_dentry);
+ if (err)
+ goto out;
+
+ __lock_d_revalidate(old_dentry);
+ __lock_d_revalidate(new_dentry);
+
+ inode_inc_iversion(new_dir);
+ new_dir->i_ctime = new_dir->i_mtime = new_dir->i_atime = ts;
+ if (IS_DIRSYNC(new_dir))
+ (void) sdfat_sync_inode(new_dir);
+ else
+ mark_inode_dirty(new_dir);
+
+ i_pos = sdfat_make_i_pos(&(SDFAT_I(old_inode)->fid));
+ sdfat_detach(old_inode);
+ sdfat_attach(old_inode, i_pos);
+ if (IS_DIRSYNC(new_dir))
+ (void) sdfat_sync_inode(old_inode);
+ else
+ mark_inode_dirty(old_inode);
+
+ if ((S_ISDIR(old_inode->i_mode)) && (old_dir != new_dir)) {
+ drop_nlink(old_dir);
+ if (!new_inode)
+ inc_nlink(new_dir);
+ }
+
+ inode_inc_iversion(old_dir);
+ old_dir->i_ctime = old_dir->i_mtime = ts;
+ if (IS_DIRSYNC(old_dir))
+ (void) sdfat_sync_inode(old_dir);
+ else
+ mark_inode_dirty(old_dir);
+
+ if (new_inode) {
+ sdfat_detach(new_inode);
+
+ /* skip drop_nlink if new_inode already has been dropped */
+ if (new_inode->i_nlink) {
+ drop_nlink(new_inode);
+ if (S_ISDIR(new_inode->i_mode))
+ drop_nlink(new_inode);
+ } else {
+ EMSG("%s : abnormal access to an inode dropped\n",
+ __func__);
+ WARN_ON(new_inode->i_nlink == 0);
+ }
+ new_inode->i_ctime = ts;
+#if 0
+ (void) sdfat_sync_inode(new_inode);
+#endif
+ }
+
+out:
+ __unlock_d_revalidate(old_dentry);
+ __unlock_d_revalidate(new_dentry);
+ __unlock_super(sb);
+ TMSG("%s exited with err(%d)\n", __func__, err);
+ return err;
+}
+
+static int sdfat_cont_expand(struct inode *inode, loff_t size)
+{
+ struct address_space *mapping = inode->i_mapping;
+ loff_t start = i_size_read(inode), count = size - i_size_read(inode);
+ int err, err2;
+
+ err = generic_cont_expand_simple(inode, size);
+ if (err)
+ return err;
+
+ inode->i_ctime = inode->i_mtime = current_time(inode);
+ mark_inode_dirty(inode);
+
+ if (!IS_SYNC(inode))
+ return 0;
+
+ err = filemap_fdatawrite_range(mapping, start, start + count - 1);
+ err2 = sync_mapping_buffers(mapping);
+ err = (err)?(err):(err2);
+ err2 = write_inode_now(inode, 1);
+ err = (err)?(err):(err2);
+ if (err)
+ return err;
+
+ return filemap_fdatawait_range(mapping, start, start + count - 1);
+}
+
+static int sdfat_allow_set_time(struct sdfat_sb_info *sbi, struct inode *inode)
+{
+ mode_t allow_utime = sbi->options.allow_utime;
+
+ if (!uid_eq(current_fsuid(), inode->i_uid)) {
+ if (in_group_p(inode->i_gid))
+ allow_utime >>= 3;
+ if (allow_utime & MAY_WRITE)
+ return 1;
+ }
+
+ /* use a default check */
+ return 0;
+}
+
+static int sdfat_sanitize_mode(const struct sdfat_sb_info *sbi,
+ struct inode *inode, umode_t *mode_ptr)
+{
+ mode_t i_mode, mask, perm;
+
+ i_mode = inode->i_mode;
+
+ if (S_ISREG(i_mode) || S_ISLNK(i_mode))
+ mask = sbi->options.fs_fmask;
+ else
+ mask = sbi->options.fs_dmask;
+
+ perm = *mode_ptr & ~(S_IFMT | mask);
+
+ /* Of the r and x bits, all (subject to umask) must be present.*/
+ if ((perm & (S_IRUGO | S_IXUGO)) != (i_mode & (S_IRUGO | S_IXUGO)))
+ return -EPERM;
+
+ if (sdfat_mode_can_hold_ro(inode)) {
+ /* Of the w bits, either all (subject to umask) or none must be present. */
+ if ((perm & S_IWUGO) && ((perm & S_IWUGO) != (S_IWUGO & ~mask)))
+ return -EPERM;
+ } else {
+ /* If sdfat_mode_can_hold_ro(inode) is false, can't change w bits. */
+ if ((perm & S_IWUGO) != (S_IWUGO & ~mask))
+ return -EPERM;
+ }
+
+ *mode_ptr &= S_IFMT | perm;
+
+ return 0;
+}
+
+/*
+ * sdfat_block_truncate_page() zeroes out a mapping from file offset `from'
+ * up to the end of the block which corresponds to `from'.
+ * This is required during truncate to physically zeroout the tail end
+ * of that block so it doesn't yield old data if the file is later grown.
+ * Also, avoid causing failure from fsx for cases of "data past EOF"
+ */
+static int sdfat_block_truncate_page(struct inode *inode, loff_t from)
+{
+ return block_truncate_page(inode->i_mapping, from, sdfat_get_block);
+}
+
+static int sdfat_setattr(struct dentry *dentry, struct iattr *attr)
+{
+
+ struct sdfat_sb_info *sbi = SDFAT_SB(dentry->d_sb);
+ struct inode *inode = dentry->d_inode;
+ unsigned int ia_valid;
+ int error;
+ loff_t old_size;
+
+ TMSG("%s entered\n", __func__);
+
+ if ((attr->ia_valid & ATTR_SIZE)
+ && (attr->ia_size > i_size_read(inode))) {
+ error = sdfat_cont_expand(inode, attr->ia_size);
+ if (error || attr->ia_valid == ATTR_SIZE)
+ goto out;
+ attr->ia_valid &= ~ATTR_SIZE;
+ }
+
+ /* Check for setting the inode time. */
+ ia_valid = attr->ia_valid;
+ if ((ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET))
+ && sdfat_allow_set_time(sbi, inode)) {
+ attr->ia_valid &= ~(ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET);
+ }
+
+ error = setattr_prepare(dentry, attr);
+ attr->ia_valid = ia_valid;
+ if (error)
+ goto out;
+
+ if (((attr->ia_valid & ATTR_UID) &&
+ (!uid_eq(attr->ia_uid, sbi->options.fs_uid))) ||
+ ((attr->ia_valid & ATTR_GID) &&
+ (!gid_eq(attr->ia_gid, sbi->options.fs_gid))) ||
+ ((attr->ia_valid & ATTR_MODE) &&
+ (attr->ia_mode & ~(S_IFREG | S_IFLNK | S_IFDIR | S_IRWXUGO)))) {
+ error = -EPERM;
+ goto out;
+ }
+
+ /*
+ * We don't return -EPERM here. Yes, strange, but this is too
+ * old behavior.
+ */
+ if (attr->ia_valid & ATTR_MODE) {
+ if (sdfat_sanitize_mode(sbi, inode, &attr->ia_mode) < 0)
+ attr->ia_valid &= ~ATTR_MODE;
+ }
+
+ SDFAT_I(inode)->fid.size = i_size_read(inode);
+
+ /* patch 1.2.0 : fixed the problem of size mismatch. */
+ if (attr->ia_valid & ATTR_SIZE) {
+ error = sdfat_block_truncate_page(inode, attr->ia_size);
+ if (error)
+ goto out;
+
+ old_size = i_size_read(inode);
+
+ /* TO CHECK evicting directory works correctly */
+ MMSG("%s: inode(%p) truncate size (%llu->%llu)\n", __func__,
+ inode, (u64)old_size, (u64)attr->ia_size);
+ __sdfat_do_truncate(inode, old_size, attr->ia_size);
+ }
+ setattr_copy(inode, attr);
+ mark_inode_dirty(inode);
+out:
+ TMSG("%s exited with err(%d)\n", __func__, error);
+ return error;
+}
+
+static const struct inode_operations sdfat_dir_inode_operations = {
+ .create = sdfat_create,
+ .lookup = sdfat_lookup,
+ .unlink = sdfat_unlink,
+ .symlink = sdfat_symlink,
+ .mkdir = sdfat_mkdir,
+ .rmdir = sdfat_rmdir,
+ .rename = sdfat_rename,
+ .setattr = sdfat_setattr,
+ .getattr = sdfat_getattr,
+#ifdef CONFIG_SDFAT_VIRTUAL_XATTR
+ .listxattr = sdfat_listxattr,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
+ .setxattr = sdfat_setxattr,
+ .getxattr = sdfat_getxattr,
+ .removexattr = sdfat_removexattr,
+#endif
+#endif
+};
+
+/*======================================================================*/
+/* File Operations */
+/*======================================================================*/
+static const struct inode_operations sdfat_symlink_inode_operations = {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+ .readlink = generic_readlink,
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
+ .get_link = sdfat_follow_link,
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) */
+ .follow_link = sdfat_follow_link,
+#endif
+#ifdef CONFIG_SDFAT_VIRTUAL_XATTR
+ .listxattr = sdfat_listxattr,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
+ .setxattr = sdfat_setxattr,
+ .getxattr = sdfat_getxattr,
+ .removexattr = sdfat_removexattr,
+#endif
+#endif
+};
+
+static int sdfat_file_release(struct inode *inode, struct file *filp)
+{
+ struct super_block *sb = inode->i_sb;
+
+ /* Moved below code from sdfat_write_inode
+ * TO FIX size-mismatch problem.
+ */
+ /* FIXME : Added bug_on to confirm that there is no size mismatch */
+ sdfat_debug_bug_on(SDFAT_I(inode)->fid.size != i_size_read(inode));
+ SDFAT_I(inode)->fid.size = i_size_read(inode);
+ fsapi_sync_fs(sb, 0);
+ return 0;
+}
+
+static const struct file_operations sdfat_file_operations = {
+ .llseek = generic_file_llseek,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+ .read_iter = generic_file_read_iter,
+ .write_iter = generic_file_write_iter,
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
+ .read = new_sync_read,
+ .write = new_sync_write,
+ .read_iter = generic_file_read_iter,
+ .write_iter = generic_file_write_iter,
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) */
+ .read = do_sync_read,
+ .write = do_sync_write,
+ .aio_read = generic_file_aio_read,
+ .aio_write = generic_file_aio_write,
+#endif
+ .mmap = sdfat_file_mmap,
+ .release = sdfat_file_release,
+ .unlocked_ioctl = sdfat_generic_ioctl,
+ .fsync = sdfat_file_fsync,
+ .splice_read = generic_file_splice_read,
+};
+
+static const struct address_space_operations sdfat_da_aops;
+static const struct address_space_operations sdfat_aops;
+
+static void sdfat_truncate(struct inode *inode, loff_t old_size)
+{
+ struct super_block *sb = inode->i_sb;
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ FS_INFO_T *fsi = &(sbi->fsi);
+ unsigned int blocksize = 1 << inode->i_blkbits;
+ loff_t aligned_size;
+ int err;
+
+ __lock_super(sb);
+
+ if (SDFAT_I(inode)->fid.start_clu == 0) {
+ /* Stange statement:
+ * Empty start_clu != ~0 (not allocated)
+ */
+ sdfat_fs_error(sb, "tried to truncate zeroed cluster.");
+ goto out;
+ }
+
+ sdfat_debug_check_clusters(inode);
+
+ __cancel_dfr_work(inode, (loff_t)i_size_read(inode), (loff_t)old_size, __func__);
+
+ err = fsapi_truncate(inode, old_size, i_size_read(inode));
+ if (err)
+ goto out;
+
+ inode->i_ctime = inode->i_mtime = current_time(inode);
+ if (IS_DIRSYNC(inode))
+ (void) sdfat_sync_inode(inode);
+ else
+ mark_inode_dirty(inode);
+
+ // FIXME: 확인 요망
+ // inode->i_blocks = ((SDFAT_I(inode)->i_size_ondisk + (fsi->cluster_size - 1))
+ inode->i_blocks = ((i_size_read(inode) + (fsi->cluster_size - 1)) &
+ ~((loff_t)fsi->cluster_size - 1)) >> inode->i_blkbits;
+out:
+ /*
+ * This protects against truncating a file bigger than it was then
+ * trying to write into the hole.
+ *
+ * comment by sh.hong:
+ * This seems to mean 'intra page/block' truncate and writing.
+ * I couldn't find a reason to change the values prior to fsapi_truncate
+ * Therefore, I switched the order of operations
+ * so that it's possible to utilize i_size_ondisk in fsapi_truncate
+ */
+
+ aligned_size = i_size_read(inode);
+ if (aligned_size & (blocksize - 1)) {
+ aligned_size |= (blocksize - 1);
+ aligned_size++;
+ }
+
+ if (SDFAT_I(inode)->i_size_ondisk > i_size_read(inode))
+ SDFAT_I(inode)->i_size_ondisk = aligned_size;
+
+ sdfat_debug_check_clusters(inode);
+
+ if (SDFAT_I(inode)->i_size_aligned > i_size_read(inode))
+ SDFAT_I(inode)->i_size_aligned = aligned_size;
+
+ /* After truncation :
+ * 1) Delayed allocation is OFF
+ * i_size = i_size_ondisk <= i_size_aligned
+ * (useless size var.)
+ * (block-aligned)
+ * 2) Delayed allocation is ON
+ * i_size = i_size_ondisk = i_size_aligned
+ * (will be block-aligned after write)
+ * or
+ * i_size_ondisk < i_size <= i_size_aligned (block_aligned)
+ * (will be block-aligned after write)
+ */
+
+ __unlock_super(sb);
+}
+
+static const struct inode_operations sdfat_file_inode_operations = {
+ .setattr = sdfat_setattr,
+ .getattr = sdfat_getattr,
+#ifdef CONFIG_SDFAT_VIRTUAL_XATTR
+ .listxattr = sdfat_listxattr,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
+ .setxattr = sdfat_setxattr,
+ .getxattr = sdfat_getxattr,
+ .removexattr = sdfat_removexattr,
+#endif
+#endif
+};
+
+/*======================================================================*/
+/* Address Space Operations */
+/*======================================================================*/
+/* 2-level option flag */
+#define BMAP_NOT_CREATE 0
+#define BMAP_ADD_BLOCK 1
+#define BMAP_ADD_CLUSTER 2
+#define BLOCK_ADDED(bmap_ops) (bmap_ops)
+static int sdfat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
+ unsigned long *mapped_blocks, int *create)
+{
+ struct super_block *sb = inode->i_sb;
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ FS_INFO_T *fsi = &(sbi->fsi);
+ const unsigned long blocksize = sb->s_blocksize;
+ const unsigned char blocksize_bits = sb->s_blocksize_bits;
+ sector_t last_block;
+ unsigned int cluster, clu_offset, sec_offset;
+ int err = 0;
+
+ *phys = 0;
+ *mapped_blocks = 0;
+
+ /* core code should handle EIO */
+#if 0
+ if (fsi->prev_eio && BLOCK_ADDED(*create))
+ return -EIO;
+#endif
+
+ if (((fsi->vol_type == FAT12) || (fsi->vol_type == FAT16)) &&
+ (inode->i_ino == SDFAT_ROOT_INO)) {
+ if (sector < (fsi->dentries_in_root >>
+ (sb->s_blocksize_bits - DENTRY_SIZE_BITS))) {
+ *phys = sector + fsi->root_start_sector;
+ *mapped_blocks = 1;
+ }
+ return 0;
+ }
+
+ last_block = (i_size_read(inode) + (blocksize - 1)) >> blocksize_bits;
+ if ((sector >= last_block) && (*create == BMAP_NOT_CREATE))
+ return 0;
+
+ /* Is this block already allocated? */
+ clu_offset = sector >> fsi->sect_per_clus_bits; /* cluster offset */
+
+ SDFAT_I(inode)->fid.size = i_size_read(inode);
+
+
+ if (unlikely(__check_dfr_on(inode,
+ (loff_t)((loff_t)clu_offset << fsi->cluster_size_bits),
+ (loff_t)((loff_t)(clu_offset + 1) << fsi->cluster_size_bits),
+ __func__))) {
+ err = __do_dfr_map_cluster(inode, clu_offset, &cluster);
+ } else {
+ if (*create & BMAP_ADD_CLUSTER)
+ err = fsapi_map_clus(inode, clu_offset, &cluster, 1);
+ else
+ err = fsapi_map_clus(inode, clu_offset, &cluster, ALLOC_NOWHERE);
+ }
+
+ if (err) {
+ if (err != -ENOSPC)
+ return -EIO;
+ return err;
+ }
+
+ /* FOR BIGDATA */
+ sdfat_statistics_set_rw(SDFAT_I(inode)->fid.flags,
+ clu_offset, *create & BMAP_ADD_CLUSTER);
+
+ if (!IS_CLUS_EOF(cluster)) {
+ /* sector offset in cluster */
+ sec_offset = sector & (fsi->sect_per_clus - 1);
+
+ *phys = CLUS_TO_SECT(fsi, cluster) + sec_offset;
+ *mapped_blocks = fsi->sect_per_clus - sec_offset;
+ }
+#if 0
+ else {
+ /* Debug purpose (new clu needed) */
+ ASSERT((*create & BMAP_ADD_CLUSTER) == 0);
+ ASSERT(sector >= last_block);
+ }
+#endif
+
+ if (sector < last_block)
+ *create = BMAP_NOT_CREATE;
+#if 0
+ else if (sector >= last_block)
+ *create = non-zero;
+
+ if (iblock <= last mapped-block)
+ *phys != 0
+ *create = BMAP_NOT_CREATE
+ else if (iblock <= last cluster)
+ *phys != 0
+ *create = non-zero
+#endif
+ return 0;
+}
+
+static int sdfat_da_prep_block(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create)
+{
+ struct super_block *sb = inode->i_sb;
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ FS_INFO_T *fsi = &(sbi->fsi);
+ unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
+ unsigned long mapped_blocks;
+ sector_t phys;
+ loff_t pos;
+ int sec_offset;
+ int bmap_create = create ? BMAP_ADD_BLOCK : BMAP_NOT_CREATE;
+ int err = 0;
+
+ __lock_super(sb);
+
+ /* FAT32 only */
+ ASSERT(fsi->vol_type == FAT32);
+
+ err = sdfat_bmap(inode, iblock, &phys, &mapped_blocks, &bmap_create);
+ if (err) {
+ if (err != -ENOSPC)
+ sdfat_fs_error_ratelimit(sb, "%s: failed to bmap "
+ "(iblock:%u, err:%d)", __func__,
+ (u32)iblock, err);
+ goto unlock_ret;
+ }
+
+ sec_offset = iblock & (fsi->sect_per_clus - 1);
+
+ if (phys) {
+ /* the block in in the mapped cluster boundary */
+ max_blocks = min(mapped_blocks, max_blocks);
+ map_bh(bh_result, sb, phys);
+
+ BUG_ON(BLOCK_ADDED(bmap_create) && (sec_offset == 0));
+
+ } else if (create == 1) {
+ /* Not exist: new cluster needed */
+ if (!BLOCK_ADDED(bmap_create)) {
+ sector_t last_block;
+ last_block = (i_size_read(inode) + (sb->s_blocksize - 1))
+ >> sb->s_blocksize_bits;
+ sdfat_fs_error(sb, "%s: new cluster need, but "
+ "bmap_create == BMAP_NOT_CREATE(iblock:%lld, "
+ "last_block:%lld)", __func__,
+ (s64)iblock, (s64)last_block);
+ err = -EIO;
+ goto unlock_ret;
+ }
+
+ // Reserved Cluster (only if iblock is the first sector in a clu)
+ if (sec_offset == 0) {
+ err = fsapi_reserve_clus(inode);
+ if (err) {
+ if (err != -ENOSPC)
+ sdfat_fs_error_ratelimit(sb,
+ "%s: failed to bmap "
+ "(iblock:%u, err:%d)", __func__,
+ (u32)iblock, err);
+
+ goto unlock_ret;
+ }
+ }
+
+ // Delayed mapping
+ map_bh(bh_result, sb, ~((sector_t) 0xffff));
+ set_buffer_new(bh_result);
+ set_buffer_delay(bh_result);
+
+ } else {
+ /* get_block on non-existing addr. with create==0 */
+ /*
+ * CHECKME:
+ * i_size_aligned 보다 작으면 delay 매핑을 일단
+ * 켜줘야되는 게 아닌가?
+ * - 0-fill 을 항상 하기에, FAT 에서는 문제 없음.
+ * 중간에 영역이 꽉 찼으면, 디스크에 내려가지 않고는
+ * invalidate 될 일이 없음
+ */
+ goto unlock_ret;
+ }
+
+
+ /* Newly added blocks */
+ if (BLOCK_ADDED(bmap_create)) {
+ set_buffer_new(bh_result);
+
+ SDFAT_I(inode)->i_size_aligned += max_blocks << sb->s_blocksize_bits;
+ if (phys) {
+ /* i_size_ondisk changes if a block added in the existing cluster */
+ #define num_clusters(value) ((value) ? (s32)((value - 1) >> fsi->cluster_size_bits) + 1 : 0)
+
+ /* FOR GRACEFUL ERROR HANDLING */
+ if (num_clusters(SDFAT_I(inode)->i_size_aligned) !=
+ num_clusters(SDFAT_I(inode)->i_size_ondisk)) {
+ EMSG("%s: inode(%p) invalid size (create(%d) "
+ "bmap_create(%d) phys(%lld) aligned(%lld) "
+ "on_disk(%lld) iblock(%u) sec_off(%d))\n",
+ __func__, inode, create, bmap_create, (s64)phys,
+ (s64)SDFAT_I(inode)->i_size_aligned,
+ (s64)SDFAT_I(inode)->i_size_ondisk,
+ (u32)iblock,
+ (s32)sec_offset);
+ sdfat_debug_bug_on(1);
+ }
+ SDFAT_I(inode)->i_size_ondisk = SDFAT_I(inode)->i_size_aligned;
+ }
+
+ pos = (iblock + 1) << sb->s_blocksize_bits;
+ /* Debug purpose - defensive coding */
+ ASSERT(SDFAT_I(inode)->i_size_aligned == pos);
+ if (SDFAT_I(inode)->i_size_aligned < pos)
+ SDFAT_I(inode)->i_size_aligned = pos;
+ /* Debug end */
+
+#ifdef CONFIG_SDFAT_TRACE_IO
+ /* New page added (ASSERTION: 8 blocks per page) */
+ if ((sec_offset & 7) == 0)
+ sbi->stat_n_pages_added++;
+#endif
+ }
+
+ /* FOR GRACEFUL ERROR HANDLING */
+ if (i_size_read(inode) > SDFAT_I(inode)->i_size_aligned) {
+ sdfat_fs_error_ratelimit(sb, "%s: invalid size (inode(%p), "
+ "size(%llu) > aligned(%llu)\n", __func__, inode,
+ i_size_read(inode), SDFAT_I(inode)->i_size_aligned);
+ sdfat_debug_bug_on(1);
+ }
+
+ bh_result->b_size = max_blocks << sb->s_blocksize_bits;
+
+unlock_ret:
+ __unlock_super(sb);
+ return err;
+}
+
+static int sdfat_get_block(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create)
+{
+ struct super_block *sb = inode->i_sb;
+ unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
+ int err = 0;
+ unsigned long mapped_blocks;
+ sector_t phys;
+ loff_t pos;
+ int bmap_create = create ? BMAP_ADD_CLUSTER : BMAP_NOT_CREATE;
+
+ __lock_super(sb);
+ err = sdfat_bmap(inode, iblock, &phys, &mapped_blocks, &bmap_create);
+ if (err) {
+ if (err != -ENOSPC)
+ sdfat_fs_error_ratelimit(sb, "%s: failed to bmap "
+ "(inode:%p iblock:%u, err:%d)",
+ __func__, inode, (u32)iblock, err);
+ goto unlock_ret;
+ }
+
+ if (phys) {
+ max_blocks = min(mapped_blocks, max_blocks);
+
+ /* Treat newly added block / cluster */
+ if (BLOCK_ADDED(bmap_create) || buffer_delay(bh_result)) {
+
+ /* Update i_size_ondisk */
+ pos = (iblock + 1) << sb->s_blocksize_bits;
+ if (SDFAT_I(inode)->i_size_ondisk < pos) {
+ /* Debug purpose */
+ if ((pos - SDFAT_I(inode)->i_size_ondisk) > bh_result->b_size) {
+ /* This never happens without DA */
+ MMSG("Jumping get_block\n");
+ }
+
+ SDFAT_I(inode)->i_size_ondisk = pos;
+ sdfat_debug_check_clusters(inode);
+ }
+
+ if (BLOCK_ADDED(bmap_create)) {
+ /* Old way (w/o DA)
+ * create == 1 only if iblock > i_size
+ * (in block unit)
+ */
+
+ /* 20130723 CHECK
+ * Truncate와 동시에 발생할 경우,
+ * i_size < (i_block 위치) 면서 buffer_delay()가
+ * 켜져있을 수 있다.
+ *
+ * 기존에 할당된 영역을 다시 쓸 뿐이므로 큰 문제
+ * 없지만, 그 경우, 미리 i_size_aligned 가 확장된
+ * 영역이어야 한다.
+ */
+
+ /* FOR GRACEFUL ERROR HANDLING */
+ if (buffer_delay(bh_result) &&
+ (pos > SDFAT_I(inode)->i_size_aligned)) {
+ sdfat_fs_error(sb, "requested for bmap "
+ "out of range(pos:(%llu)>i_size_aligned(%llu)\n",
+ pos, SDFAT_I(inode)->i_size_aligned);
+ sdfat_debug_bug_on(1);
+ err = -EIO;
+ goto unlock_ret;
+ }
+ set_buffer_new(bh_result);
+
+ /*
+ * adjust i_size_aligned if i_size_ondisk is
+ * bigger than it. (i.e. non-DA)
+ */
+ if (SDFAT_I(inode)->i_size_ondisk >
+ SDFAT_I(inode)->i_size_aligned) {
+ SDFAT_I(inode)->i_size_aligned =
+ SDFAT_I(inode)->i_size_ondisk;
+ }
+ }
+
+ if (buffer_delay(bh_result))
+ clear_buffer_delay(bh_result);
+
+#if 0
+ /* Debug purpose */
+ if (SDFAT_I(inode)->i_size_ondisk >
+ SDFAT_I(inode)->i_size_aligned) {
+ /* Only after truncate
+ * and the two size variables should indicate
+ * same i_block
+ */
+ unsigned int blocksize = 1 << inode->i_blkbits;
+ BUG_ON(SDFAT_I(inode)->i_size_ondisk -
+ SDFAT_I(inode)->i_size_aligned >= blocksize);
+ }
+#endif
+ }
+ map_bh(bh_result, sb, phys);
+ }
+
+ bh_result->b_size = max_blocks << sb->s_blocksize_bits;
+unlock_ret:
+ __unlock_super(sb);
+ return err;
+}
+
+static int sdfat_readpage(struct file *file, struct page *page)
+{
+ int ret;
+
+ ret = mpage_readpage(page, sdfat_get_block);
+ return ret;
+}
+
+static int sdfat_readpages(struct file *file, struct address_space *mapping,
+ struct list_head *pages, unsigned int nr_pages)
+{
+ int ret;
+
+ ret = mpage_readpages(mapping, pages, nr_pages, sdfat_get_block);
+ return ret;
+}
+
+static inline void sdfat_submit_fullpage_bio(struct block_device *bdev,
+ sector_t sector, unsigned int length,
+ struct page *page, struct writeback_control *wbc)
+{
+ /* Single page bio submit */
+ struct bio *bio;
+
+ BUG_ON((length > PAGE_SIZE) || (length == 0));
+
+ /*
+ * If __GFP_WAIT is set, then bio_alloc will always be able to allocate
+ * a bio. This is due to the mempool guarantees. To make this work, callers
+ * must never allocate more than 1 bio at a time from this pool.
+ *
+ * #define GFP_NOIO (__GFP_WAIT)
+ */
+ bio = bio_alloc(GFP_NOIO, 1);
+
+ bio_set_dev(bio, bdev);
+ bio->bi_vcnt = 1;
+ bio->bi_io_vec[0].bv_page = page; /* Inline vec */
+ bio->bi_io_vec[0].bv_len = length; /* PAGE_SIZE */
+ bio->bi_io_vec[0].bv_offset = 0;
+ __sdfat_set_bio_iterate(bio, sector, length, 0, 0);
+
+ bio->bi_end_io = sdfat_writepage_end_io;
+ __sdfat_submit_bio_write(bio, wbc);
+}
+
+static int sdfat_writepage(struct page *page, struct writeback_control *wbc)
+{
+ struct inode * const inode = page->mapping->host;
+ struct super_block *sb = inode->i_sb;
+ loff_t i_size = i_size_read(inode);
+ const pgoff_t end_index = i_size >> PAGE_SHIFT;
+ const unsigned int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ struct buffer_head *bh, *head;
+ sector_t block, block_0, last_phys;
+ int ret;
+ unsigned int nr_blocks_towrite = blocks_per_page;
+
+ /* Don't distinguish 0-filled/clean block.
+ * Just write back the whole page
+ */
+ if (fsi->cluster_size < PAGE_SIZE)
+ goto confused;
+
+ if (!PageUptodate(page)) {
+ MMSG("%s: Not up-to-date page -> block_write_full_page\n",
+ __func__);
+ goto confused;
+ }
+
+ if (page->index >= end_index) {
+ /* last page or outside i_size */
+ unsigned int offset = i_size & (PAGE_SIZE-1);
+
+ /* If a truncation is in progress */
+ if (page->index > end_index || !offset)
+ goto confused;
+
+ /* 0-fill after i_size */
+ zero_user_segment(page, offset, PAGE_SIZE);
+ }
+
+ if (!page_has_buffers(page)) {
+ MMSG("WP: No buffers -> block_write_full_page\n");
+ goto confused;
+ }
+
+ block = (sector_t)page->index << (PAGE_SHIFT - inode->i_blkbits);
+ block_0 = block; /* first block */
+ head = page_buffers(page);
+ bh = head;
+
+ last_phys = 0;
+ do {
+ BUG_ON(buffer_locked(bh));
+
+ if (!buffer_dirty(bh) || !buffer_uptodate(bh)) {
+ if (nr_blocks_towrite == blocks_per_page)
+ nr_blocks_towrite = (unsigned int) (block - block_0);
+
+ BUG_ON(nr_blocks_towrite >= blocks_per_page);
+
+ // !uptodate but dirty??
+ if (buffer_dirty(bh))
+ goto confused;
+
+ // Nothing to writeback in this block
+ bh = bh->b_this_page;
+ block++;
+ continue;
+ }
+
+ if (nr_blocks_towrite != blocks_per_page)
+ // Dirty -> Non-dirty -> Dirty again case
+ goto confused;
+
+ /* Map if needed */
+ if (!buffer_mapped(bh) || buffer_delay(bh)) {
+ BUG_ON(bh->b_size != (1 << (inode->i_blkbits)));
+ ret = sdfat_get_block(inode, block, bh, 1);
+ if (ret)
+ goto confused;
+
+ if (buffer_new(bh)) {
+ clear_buffer_new(bh);
+ __sdfat_clean_bdev_aliases(bh->b_bdev, bh->b_blocknr);
+ }
+ }
+
+ /* continuity check */
+ if (((last_phys + 1) != bh->b_blocknr) && (last_phys != 0)) {
+ DMSG("Non-contiguous block mapping in single page");
+ goto confused;
+ }
+
+ last_phys = bh->b_blocknr;
+ bh = bh->b_this_page;
+ block++;
+ } while (bh != head);
+
+ if (nr_blocks_towrite == 0) {
+ DMSG("Page dirty but no dirty bh? alloc_208\n");
+ goto confused;
+ }
+
+
+ /* Write-back */
+ do {
+ clear_buffer_dirty(bh);
+ bh = bh->b_this_page;
+ } while (bh != head);
+
+ BUG_ON(PageWriteback(page));
+ set_page_writeback(page);
+
+ /**
+ * Turn off MAPPED flag in victim's bh if defrag on.
+ * Another write_begin can starts after get_block for defrag victims called.
+ * In this case, write_begin calls get_block and get original block number
+ * and previous defrag will be canceled.
+ */
+ if (unlikely(__check_dfr_on(inode,
+ (loff_t)(page->index << PAGE_SHIFT),
+ (loff_t)((page->index + 1) << PAGE_SHIFT),
+ __func__))) {
+ do {
+ clear_buffer_mapped(bh);
+ bh = bh->b_this_page;
+ } while (bh != head);
+ }
+
+ // Trace # of pages queued (Approx.)
+ atomic_inc(&SDFAT_SB(sb)->stat_n_pages_queued);
+
+ sdfat_submit_fullpage_bio(head->b_bdev,
+ head->b_blocknr << (sb->s_blocksize_bits - SECTOR_SIZE_BITS),
+ nr_blocks_towrite << inode->i_blkbits,
+ page, wbc);
+
+ unlock_page(page);
+
+ return 0;
+
+confused:
+#ifdef CONFIG_SDFAT_TRACE_IO
+ SDFAT_SB(sb)->stat_n_pages_confused++;
+#endif
+ ret = block_write_full_page(page, sdfat_get_block, wbc);
+ return ret;
+}
+
+static int sdfat_da_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ MMSG("%s(inode:%p) with nr_to_write = 0x%08lx "
+ "(ku %d, bg %d, tag %d, rc %d )\n",
+ __func__, mapping->host, wbc->nr_to_write,
+ wbc->for_kupdate, wbc->for_background, wbc->tagged_writepages,
+ wbc->for_reclaim);
+
+ ASSERT(mapping->a_ops == &sdfat_da_aops);
+
+#ifdef CONFIG_SDFAT_ALIGNED_MPAGE_WRITE
+ if (SDFAT_SB(mapping->host->i_sb)->options.adj_req)
+ return sdfat_mpage_writepages(mapping, wbc, sdfat_get_block);
+#endif
+ return generic_writepages(mapping, wbc);
+}
+
+static int sdfat_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ MMSG("%s(inode:%p) with nr_to_write = 0x%08lx "
+ "(ku %d, bg %d, tag %d, rc %d )\n",
+ __func__, mapping->host, wbc->nr_to_write,
+ wbc->for_kupdate, wbc->for_background, wbc->tagged_writepages,
+ wbc->for_reclaim);
+
+ ASSERT(mapping->a_ops == &sdfat_aops);
+
+#ifdef CONFIG_SDFAT_ALIGNED_MPAGE_WRITE
+ if (SDFAT_SB(mapping->host->i_sb)->options.adj_req)
+ return sdfat_mpage_writepages(mapping, wbc, sdfat_get_block);
+#endif
+ return mpage_writepages(mapping, wbc, sdfat_get_block);
+}
+
+static void sdfat_write_failed(struct address_space *mapping, loff_t to)
+{
+ struct inode *inode = mapping->host;
+
+ if (to > i_size_read(inode)) {
+ __sdfat_truncate_pagecache(inode, to, i_size_read(inode));
+ sdfat_truncate(inode, SDFAT_I(inode)->i_size_aligned);
+ }
+}
+
+static int sdfat_check_writable(struct super_block *sb)
+{
+ if (fsapi_check_bdi_valid(sb))
+ return -EIO;
+
+ if (sb_rdonly(sb))
+ return -EROFS;
+
+ return 0;
+}
+
+static int __sdfat_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned int len,
+ unsigned int flags, struct page **pagep,
+ void **fsdata, get_block_t *get_block,
+ loff_t *bytes, const char *fname)
+{
+ struct super_block *sb = mapping->host->i_sb;
+ int ret;
+
+ __cancel_dfr_work(mapping->host, pos, (loff_t)(pos + len), fname);
+
+ ret = sdfat_check_writable(sb);
+ if (unlikely(ret < 0))
+ return ret;
+
+ *pagep = NULL;
+ ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
+ get_block, bytes);
+
+ if (ret < 0)
+ sdfat_write_failed(mapping, pos+len);
+
+ return ret;
+}
+
+
+static int sdfat_da_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned int len, unsigned int flags,
+ struct page **pagep, void **fsdata)
+{
+ return __sdfat_write_begin(file, mapping, pos, len, flags,
+ pagep, fsdata, sdfat_da_prep_block,
+ &SDFAT_I(mapping->host)->i_size_aligned,
+ __func__);
+}
+
+
+static int sdfat_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned int len, unsigned int flags,
+ struct page **pagep, void **fsdata)
+{
+ return __sdfat_write_begin(file, mapping, pos, len, flags,
+ pagep, fsdata, sdfat_get_block,
+ &SDFAT_I(mapping->host)->i_size_ondisk,
+ __func__);
+}
+
+static int sdfat_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned int len, unsigned int copied,
+ struct page *pagep, void *fsdata)
+{
+ struct inode *inode = mapping->host;
+ FILE_ID_T *fid = &(SDFAT_I(inode)->fid);
+ int err;
+
+ err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata);
+
+ /* FOR GRACEFUL ERROR HANDLING */
+ if (SDFAT_I(inode)->i_size_aligned < i_size_read(inode)) {
+ sdfat_fs_error(inode->i_sb, "invalid size(size(%llu) "
+ "> aligned(%llu)\n", i_size_read(inode),
+ SDFAT_I(inode)->i_size_aligned);
+ sdfat_debug_bug_on(1);
+ }
+
+ if (err < len)
+ sdfat_write_failed(mapping, pos+len);
+
+ if (!(err < 0) && !(fid->attr & ATTR_ARCHIVE)) {
+ inode->i_mtime = inode->i_ctime = current_time(inode);
+ fid->attr |= ATTR_ARCHIVE;
+ mark_inode_dirty(inode);
+ }
+
+ return err;
+}
+
+static inline ssize_t __sdfat_direct_IO(int rw, struct kiocb *iocb,
+ struct inode *inode, void *iov_u, loff_t offset,
+ loff_t count, unsigned long nr_segs)
+{
+ struct address_space *mapping = inode->i_mapping;
+ loff_t size = offset + count;
+ ssize_t ret;
+
+ if (rw == WRITE) {
+ /*
+ * FIXME: blockdev_direct_IO() doesn't use ->write_begin(),
+ * so we need to update the ->i_size_aligned to block boundary.
+ *
+ * But we must fill the remaining area or hole by nul for
+ * updating ->i_size_aligned
+ *
+ * Return 0, and fallback to normal buffered write.
+ */
+ if (SDFAT_I(inode)->i_size_aligned < size)
+ return 0;
+ }
+
+ /*
+ * sdFAT need to use the DIO_LOCKING for avoiding the race
+ * condition of sdfat_get_block() and ->truncate().
+ */
+ ret = __sdfat_blkdev_direct_IO(rw, iocb, inode, iov_u, offset, nr_segs);
+ if (ret < 0 && (rw & WRITE))
+ sdfat_write_failed(mapping, size);
+
+ return ret;
+}
+
+static const struct address_space_operations sdfat_aops = {
+ .readpage = sdfat_readpage,
+ .readpages = sdfat_readpages,
+ .writepage = sdfat_writepage,
+ .writepages = sdfat_writepages,
+ .write_begin = sdfat_write_begin,
+ .write_end = sdfat_write_end,
+ .direct_IO = sdfat_direct_IO,
+ .bmap = sdfat_aop_bmap
+};
+
+static const struct address_space_operations sdfat_da_aops = {
+ .readpage = sdfat_readpage,
+ .readpages = sdfat_readpages,
+ .writepage = sdfat_writepage,
+ .writepages = sdfat_da_writepages,
+ .write_begin = sdfat_da_write_begin,
+ .write_end = sdfat_write_end,
+ .direct_IO = sdfat_direct_IO,
+ .bmap = sdfat_aop_bmap
+};
+
+/*======================================================================*/
+/* Super Operations */
+/*======================================================================*/
+
+static inline unsigned long sdfat_hash(loff_t i_pos)
+{
+ return hash_32(i_pos, SDFAT_HASH_BITS);
+}
+
+static void sdfat_attach(struct inode *inode, loff_t i_pos)
+{
+ struct sdfat_sb_info *sbi = SDFAT_SB(inode->i_sb);
+ struct hlist_head *head = sbi->inode_hashtable + sdfat_hash(i_pos);
+
+ spin_lock(&sbi->inode_hash_lock);
+ SDFAT_I(inode)->i_pos = i_pos;
+ hlist_add_head(&SDFAT_I(inode)->i_hash_fat, head);
+ spin_unlock(&sbi->inode_hash_lock);
+}
+
+static void sdfat_detach(struct inode *inode)
+{
+ struct sdfat_sb_info *sbi = SDFAT_SB(inode->i_sb);
+
+ spin_lock(&sbi->inode_hash_lock);
+ hlist_del_init(&SDFAT_I(inode)->i_hash_fat);
+ SDFAT_I(inode)->i_pos = 0;
+ spin_unlock(&sbi->inode_hash_lock);
+}
+
+
+/* doesn't deal with root inode */
+static int sdfat_fill_inode(struct inode *inode, const FILE_ID_T *fid)
+{
+ struct sdfat_sb_info *sbi = SDFAT_SB(inode->i_sb);
+ FS_INFO_T *fsi = &(sbi->fsi);
+ DIR_ENTRY_T info;
+ u64 size = fid->size;
+
+ memcpy(&(SDFAT_I(inode)->fid), fid, sizeof(FILE_ID_T));
+
+ SDFAT_I(inode)->i_pos = 0;
+ SDFAT_I(inode)->target = NULL;
+ inode->i_uid = sbi->options.fs_uid;
+ inode->i_gid = sbi->options.fs_gid;
+ inode_inc_iversion(inode);
+ inode->i_generation = get_seconds();
+
+ if (fsapi_read_inode(inode, &info) < 0) {
+ MMSG("%s: failed to read stat!\n", __func__);
+ return -EIO;
+ }
+
+ if (info.Attr & ATTR_SUBDIR) { /* directory */
+ inode->i_generation &= ~1;
+ inode->i_mode = sdfat_make_mode(sbi, info.Attr, S_IRWXUGO);
+ inode->i_op = &sdfat_dir_inode_operations;
+ inode->i_fop = &sdfat_dir_operations;
+
+ set_nlink(inode, info.NumSubdirs);
+ } else if (info.Attr & ATTR_SYMLINK) { /* symbolic link */
+ inode->i_op = &sdfat_symlink_inode_operations;
+ inode->i_generation |= 1;
+ inode->i_mode = sdfat_make_mode(sbi, info.Attr, S_IRWXUGO);
+ } else { /* regular file */
+ inode->i_generation |= 1;
+ inode->i_mode = sdfat_make_mode(sbi, info.Attr, S_IRWXUGO);
+ inode->i_op = &sdfat_file_inode_operations;
+ inode->i_fop = &sdfat_file_operations;
+
+ if (sbi->options.improved_allocation & SDFAT_ALLOC_DELAY)
+ inode->i_mapping->a_ops = &sdfat_da_aops;
+ else
+ inode->i_mapping->a_ops = &sdfat_aops;
+
+ inode->i_mapping->nrpages = 0;
+
+ }
+
+ /*
+ * Use fid->size instead of info.Size
+ * because info.Size means the value saved on disk
+ */
+ i_size_write(inode, size);
+
+ /* ondisk and aligned size should be aligned with block size */
+ if (size & (inode->i_sb->s_blocksize - 1)) {
+ size |= (inode->i_sb->s_blocksize - 1);
+ size++;
+ }
+
+ SDFAT_I(inode)->i_size_aligned = size;
+ SDFAT_I(inode)->i_size_ondisk = size;
+ sdfat_debug_check_clusters(inode);
+
+ sdfat_save_attr(inode, info.Attr);
+
+ inode->i_blocks = ((i_size_read(inode) + (fsi->cluster_size - 1))
+ & ~((loff_t)fsi->cluster_size - 1)) >> inode->i_blkbits;
+
+ sdfat_time_fat2unix(sbi, &inode->i_mtime, &info.ModifyTimestamp);
+ sdfat_time_fat2unix(sbi, &inode->i_ctime, &info.CreateTimestamp);
+ sdfat_time_fat2unix(sbi, &inode->i_atime, &info.AccessTimestamp);
+
+ __init_dfr_info(inode);
+
+ return 0;
+}
+
+static struct inode *sdfat_build_inode(struct super_block *sb,
+ const FILE_ID_T *fid, loff_t i_pos) {
+ struct inode *inode;
+ int err;
+
+ inode = sdfat_iget(sb, i_pos);
+ if (inode)
+ goto out;
+ inode = new_inode(sb);
+ if (!inode) {
+ inode = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+ inode->i_ino = iunique(sb, SDFAT_ROOT_INO);
+ inode_set_iversion(inode, 1);
+ err = sdfat_fill_inode(inode, fid);
+ if (err) {
+ iput(inode);
+ inode = ERR_PTR(err);
+ goto out;
+ }
+ sdfat_attach(inode, i_pos);
+ insert_inode_hash(inode);
+out:
+ return inode;
+}
+
+static struct inode *sdfat_alloc_inode(struct super_block *sb)
+{
+ struct sdfat_inode_info *ei;
+
+ ei = kmem_cache_alloc(sdfat_inode_cachep, GFP_NOFS);
+ if (!ei)
+ return NULL;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
+ init_rwsem(&ei->truncate_lock);
+#endif
+ return &ei->vfs_inode;
+}
+
+static void sdfat_free_inode(struct inode *inode)
+{
+ if (SDFAT_I(inode)->target) {
+ kfree(SDFAT_I(inode)->target);
+ SDFAT_I(inode)->target = NULL;
+ }
+
+ kmem_cache_free(sdfat_inode_cachep, SDFAT_I(inode));
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
+/* Use free_inode instead of destroy_inode */
+#define sdfat_destroy_inode (NULL)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+static void sdfat_i_callback(struct rcu_head *head)
+{
+ struct inode *inode = container_of(head, struct inode, i_rcu);
+
+ sdfat_free_inode(inode);
+}
+
+static void sdfat_destroy_inode(struct inode *inode)
+{
+ call_rcu(&inode->i_rcu, sdfat_i_callback);
+}
+#else
+static void sdfat_destroy_inode(struct inode *inode)
+{
+ sdfat_free_inode(inode);
+}
+#endif
+
+static int __sdfat_write_inode(struct inode *inode, int sync)
+{
+ struct super_block *sb = inode->i_sb;
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ DIR_ENTRY_T info;
+
+ if (inode->i_ino == SDFAT_ROOT_INO)
+ return 0;
+
+ info.Attr = sdfat_make_attr(inode);
+ info.Size = i_size_read(inode);
+
+ sdfat_time_unix2fat(sbi, &inode->i_mtime, &info.ModifyTimestamp);
+ sdfat_time_unix2fat(sbi, &inode->i_ctime, &info.CreateTimestamp);
+ sdfat_time_unix2fat(sbi, &inode->i_atime, &info.AccessTimestamp);
+
+ if (!__support_write_inode_sync(sb))
+ sync = 0;
+
+ /* FIXME : Do we need handling error? */
+ return fsapi_write_inode(inode, &info, sync);
+}
+
+static int sdfat_sync_inode(struct inode *inode)
+{
+ return __sdfat_write_inode(inode, 1);
+}
+
+static int sdfat_write_inode(struct inode *inode, struct writeback_control *wbc)
+{
+ return __sdfat_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
+}
+
+static void sdfat_evict_inode(struct inode *inode)
+{
+ truncate_inode_pages_final(&inode->i_data);
+
+ if (!inode->i_nlink) {
+ loff_t old_size = i_size_read(inode);
+
+ i_size_write(inode, 0);
+
+ SDFAT_I(inode)->fid.size = old_size;
+
+ __cancel_dfr_work(inode, 0, (loff_t)old_size, __func__);
+
+ /* TO CHECK evicting directory works correctly */
+ MMSG("%s: inode(%p) evict %s (size(%llu) to zero)\n",
+ __func__, inode,
+ S_ISDIR(inode->i_mode) ? "directory" : "file",
+ (u64)old_size);
+ fsapi_truncate(inode, old_size, 0);
+ }
+
+ invalidate_inode_buffers(inode);
+ clear_inode(inode);
+ fsapi_invalidate_extent(inode);
+ sdfat_detach(inode);
+
+ /* after end of this function, caller will remove inode hash */
+ /* remove_inode_hash(inode); */
+}
+
+static void sdfat_free_sb_info(struct sdfat_sb_info *sbi)
+{
+ if (sbi->nls_disk) {
+ unload_nls(sbi->nls_disk);
+ sbi->nls_disk = NULL;
+ sbi->options.codepage = sdfat_default_codepage;
+ }
+ if (sbi->nls_io) {
+ unload_nls(sbi->nls_io);
+ sbi->nls_io = NULL;
+ }
+ if (sbi->options.iocharset != sdfat_default_iocharset) {
+ kfree(sbi->options.iocharset);
+ sbi->options.iocharset = sdfat_default_iocharset;
+ }
+
+ if (sbi->use_vmalloc) {
+ vfree(sbi);
+ return;
+ }
+ kfree(sbi);
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+static void delayed_free(struct rcu_head *p)
+{
+ struct sdfat_sb_info *sbi = container_of(p, struct sdfat_sb_info, rcu);
+
+ sdfat_free_sb_info(sbi);
+}
+
+static void __sdfat_destroy_sb_info(struct super_block *sb)
+{
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+
+ call_rcu(&sbi->rcu, delayed_free);
+}
+#else
+static void __sdfat_destroy_sb_info(struct super_block *sb)
+{
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+
+ sdfat_free_sb_info(sbi);
+ sb->s_fs_info = NULL;
+}
+#endif
+
+static void sdfat_destroy_sb_info(struct super_block *sb)
+{
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+
+ kobject_del(&sbi->sb_kobj);
+ kobject_put(&sbi->sb_kobj);
+
+ __sdfat_destroy_sb_info(sb);
+}
+
+static void sdfat_put_super(struct super_block *sb)
+{
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ int err;
+
+ sdfat_log_msg(sb, KERN_INFO, "trying to unmount...");
+
+ __cancel_delayed_work_sync(sbi);
+
+ if (__is_sb_dirty(sb))
+ sdfat_write_super(sb);
+
+ __free_dfr_mem_if_required(sb);
+ err = fsapi_umount(sb);
+
+ sdfat_destroy_sb_info(sb);
+
+ sdfat_log_msg(sb, KERN_INFO, "unmounted successfully! %s",
+ err ? "(with previous I/O errors)" : "");
+}
+
+static inline void __flush_delayed_meta(struct super_block *sb, s32 sync)
+{
+#ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
+ fsapi_cache_flush(sb, sync);
+#else
+ /* DO NOTHING */
+#endif
+}
+
+static void sdfat_write_super(struct super_block *sb)
+{
+ int time = 0;
+
+ __lock_super(sb);
+
+ __set_sb_clean(sb);
+
+#ifdef CONFIG_SDFAT_DFR
+ if (atomic_read(&(SDFAT_SB(sb)->dfr_info.stat)) == DFR_SB_STAT_VALID)
+ fsapi_dfr_update_fat_next(sb);
+#endif
+
+ /* flush delayed FAT/DIR dirty */
+ __flush_delayed_meta(sb, 0);
+
+ if (!sb_rdonly(sb))
+ fsapi_sync_fs(sb, 0);
+
+ __unlock_super(sb);
+
+ time = jiffies;
+
+ /* Issuing bdev requests is needed
+ * to guarantee DIR updates in time
+ * whether w/ or w/o delayed DIR dirty feature.
+ * (otherwise DIR updates could be delayed for 5 + 5 secs at max.)
+ */
+ sync_blockdev(sb->s_bdev);
+
+#if (defined(CONFIG_SDFAT_DFR) && defined(CONFIG_SDFAT_DFR_DEBUG))
+ /* SPO test */
+ fsapi_dfr_spo_test(sb, DFR_SPO_FAT_NEXT, __func__);
+#endif
+ MMSG("BD: sdfat_write_super (bdev_sync for %ld ms)\n",
+ (jiffies - time) * 1000 / HZ);
+}
+
+
+static void __dfr_update_fat_next(struct super_block *sb)
+{
+#ifdef CONFIG_SDFAT_DFR
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+
+ if (sbi->options.defrag &&
+ (atomic_read(&sbi->dfr_info.stat) == DFR_SB_STAT_VALID)) {
+ fsapi_dfr_update_fat_next(sb);
+ }
+#endif
+}
+
+static void __dfr_update_fat_prev(struct super_block *sb, int wait)
+{
+#ifdef CONFIG_SDFAT_DFR
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ struct defrag_info *sb_dfr = &sbi->dfr_info;
+ /* static time available? */
+ static int time; /* initialized by zero */
+ int uevent = 0, total = 0, clean = 0, full = 0;
+ int spent = jiffies - time;
+
+ if (!(sbi->options.defrag && wait))
+ return;
+
+ __lock_super(sb);
+ /* Update FAT for defrag */
+ if (atomic_read(&(sbi->dfr_info.stat)) == DFR_SB_STAT_VALID) {
+
+ fsapi_dfr_update_fat_prev(sb, 0);
+
+ /* flush delayed FAT/DIR dirty */
+ __flush_delayed_meta(sb, 0);
+
+ /* Complete defrag req */
+ fsapi_sync_fs(sb, 1);
+ atomic_set(&sb_dfr->stat, DFR_SB_STAT_REQ);
+ complete_all(&sbi->dfr_complete);
+ } else if (((spent < 0) || (spent > DFR_DEFAULT_TIMEOUT)) &&
+ (atomic_read(&(sbi->dfr_info.stat)) == DFR_SB_STAT_IDLE)) {
+ uevent = fsapi_dfr_check_dfr_required(sb, &total, &clean, &full);
+ time = jiffies;
+ }
+ __unlock_super(sb);
+
+ if (uevent) {
+ kobject_uevent(&SDFAT_SB(sb)->sb_kobj, KOBJ_CHANGE);
+ dfr_debug("uevent for defrag_daemon, total_au %d, "
+ "clean_au %d, full_au %d", total, clean, full);
+ }
+#endif
+}
+
+static int sdfat_sync_fs(struct super_block *sb, int wait)
+{
+ int err = 0;
+
+ /* If there are some dirty buffers in the bdev inode */
+ if (__is_sb_dirty(sb)) {
+ __lock_super(sb);
+ __set_sb_clean(sb);
+
+ __dfr_update_fat_next(sb);
+
+ err = fsapi_sync_fs(sb, 1);
+
+#if (defined(CONFIG_SDFAT_DFR) && defined(CONFIG_SDFAT_DFR_DEBUG))
+ /* SPO test */
+ fsapi_dfr_spo_test(sb, DFR_SPO_FAT_NEXT, __func__);
+#endif
+
+ __unlock_super(sb);
+ }
+
+ __dfr_update_fat_prev(sb, wait);
+
+ return err;
+}
+
+static int sdfat_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+ /*
+ * patch 1.2.2 :
+ * fixed the slow-call problem because of volume-lock contention.
+ */
+ struct super_block *sb = dentry->d_sb;
+ u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+ VOL_INFO_T info;
+
+ /* fsapi_statfs will try to get a volume lock if needed */
+ if (fsapi_statfs(sb, &info))
+ return -EIO;
+
+ if (fsi->prev_eio)
+ sdfat_msg(sb, KERN_INFO, "called statfs with previous"
+ " I/O error(0x%02X).", fsi->prev_eio);
+
+ buf->f_type = sb->s_magic;
+ buf->f_bsize = info.ClusterSize;
+ buf->f_blocks = info.NumClusters;
+ buf->f_bfree = info.FreeClusters;
+ buf->f_bavail = info.FreeClusters;
+ buf->f_fsid.val[0] = (u32)id;
+ buf->f_fsid.val[1] = (u32)(id >> 32);
+ /* Unicode utf8 255 characters */
+ buf->f_namelen = MAX_NAME_LENGTH * MAX_CHARSET_SIZE;
+
+ return 0;
+}
+
+static int sdfat_remount(struct super_block *sb, int *flags, char *data)
+{
+ unsigned long prev_sb_flags;
+ char *orig_data = kstrdup(data, GFP_KERNEL);
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ FS_INFO_T *fsi = &(sbi->fsi);
+
+ *flags |= SB_NODIRATIME;
+
+ prev_sb_flags = sb->s_flags;
+
+ sdfat_remount_syncfs(sb);
+
+ fsapi_set_vol_flags(sb, VOL_CLEAN, 1);
+
+ sdfat_log_msg(sb, KERN_INFO, "re-mounted(%s->%s), eio=0x%x, Opts: %s",
+ (prev_sb_flags & SB_RDONLY) ? "ro" : "rw",
+ (*flags & SB_RDONLY) ? "ro" : "rw",
+ fsi->prev_eio, orig_data);
+ kfree(orig_data);
+ return 0;
+}
+
+static int __sdfat_show_options(struct seq_file *m, struct super_block *sb)
+{
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ struct sdfat_mount_options *opts = &sbi->options;
+ FS_INFO_T *fsi = &(sbi->fsi);
+
+ /* Show partition info */
+ seq_printf(m, ",fs=%s", sdfat_get_vol_type_str(fsi->vol_type));
+ if (fsi->prev_eio)
+ seq_printf(m, ",eio=0x%x", fsi->prev_eio);
+ if (!uid_eq(opts->fs_uid, GLOBAL_ROOT_UID))
+ seq_printf(m, ",uid=%u",
+ from_kuid_munged(&init_user_ns, opts->fs_uid));
+ if (!gid_eq(opts->fs_gid, GLOBAL_ROOT_GID))
+ seq_printf(m, ",gid=%u",
+ from_kgid_munged(&init_user_ns, opts->fs_gid));
+ seq_printf(m, ",fmask=%04o", opts->fs_fmask);
+ seq_printf(m, ",dmask=%04o", opts->fs_dmask);
+ if (opts->allow_utime)
+ seq_printf(m, ",allow_utime=%04o", opts->allow_utime);
+ if (sbi->nls_disk)
+ seq_printf(m, ",codepage=%s", sbi->nls_disk->charset);
+ if (sbi->nls_io)
+ seq_printf(m, ",iocharset=%s", sbi->nls_io->charset);
+ if (opts->utf8)
+ seq_puts(m, ",utf8");
+ if (sbi->fsi.vol_type != EXFAT)
+ seq_puts(m, ",shortname=winnt");
+ seq_printf(m, ",namecase=%u", opts->casesensitive);
+ if (opts->tz_utc)
+ seq_puts(m, ",tz=UTC");
+ if (opts->improved_allocation & SDFAT_ALLOC_DELAY)
+ seq_puts(m, ",delay");
+ if (opts->improved_allocation & SDFAT_ALLOC_SMART)
+ seq_printf(m, ",smart,ausize=%u", opts->amap_opt.sect_per_au);
+ if (opts->defrag)
+ seq_puts(m, ",defrag");
+ if (opts->adj_hidsect)
+ seq_puts(m, ",adj_hid");
+ if (opts->adj_req)
+ seq_puts(m, ",adj_req");
+ seq_printf(m, ",symlink=%u", opts->symlink);
+ seq_printf(m, ",bps=%ld", sb->s_blocksize);
+ if (opts->errors == SDFAT_ERRORS_CONT)
+ seq_puts(m, ",errors=continue");
+ else if (opts->errors == SDFAT_ERRORS_PANIC)
+ seq_puts(m, ",errors=panic");
+ else
+ seq_puts(m, ",errors=remount-ro");
+ if (opts->discard)
+ seq_puts(m, ",discard");
+
+ return 0;
+}
+
+static const struct super_operations sdfat_sops = {
+ .alloc_inode = sdfat_alloc_inode,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
+ .free_inode = sdfat_free_inode,
+#else
+ .destroy_inode = sdfat_destroy_inode,
+#endif
+ .write_inode = sdfat_write_inode,
+ .evict_inode = sdfat_evict_inode,
+ .put_super = sdfat_put_super,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)
+ .write_super = sdfat_write_super,
+#endif
+ .sync_fs = sdfat_sync_fs,
+ .statfs = sdfat_statfs,
+ .remount_fs = sdfat_remount,
+ .show_options = sdfat_show_options,
+};
+
+/*======================================================================*/
+/* SYSFS Operations */
+/*======================================================================*/
+#define SDFAT_ATTR(name, mode, show, store) \
+static struct sdfat_attr sdfat_attr_##name = __ATTR(name, mode, show, store)
+
+struct sdfat_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct sdfat_sb_info *, char *);
+ ssize_t (*store)(struct sdfat_sb_info *, const char *, size_t);
+};
+
+static ssize_t sdfat_attr_show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ struct sdfat_sb_info *sbi = container_of(kobj, struct sdfat_sb_info, sb_kobj);
+ struct sdfat_attr *a = container_of(attr, struct sdfat_attr, attr);
+
+ return a->show ? a->show(sbi, buf) : 0;
+}
+
+static ssize_t sdfat_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t len)
+{
+ struct sdfat_sb_info *sbi = container_of(kobj, struct sdfat_sb_info, sb_kobj);
+ struct sdfat_attr *a = container_of(attr, struct sdfat_attr, attr);
+
+ return a->store ? a->store(sbi, buf, len) : len;
+}
+
+static const struct sysfs_ops sdfat_attr_ops = {
+ .show = sdfat_attr_show,
+ .store = sdfat_attr_store,
+};
+
+
+static ssize_t type_show(struct sdfat_sb_info *sbi, char *buf)
+{
+ FS_INFO_T *fsi = &(sbi->fsi);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", sdfat_get_vol_type_str(fsi->vol_type));
+}
+SDFAT_ATTR(type, 0444, type_show, NULL);
+
+static ssize_t eio_show(struct sdfat_sb_info *sbi, char *buf)
+{
+ FS_INFO_T *fsi = &(sbi->fsi);
+
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", fsi->prev_eio);
+}
+SDFAT_ATTR(eio, 0444, eio_show, NULL);
+
+static ssize_t fratio_show(struct sdfat_sb_info *sbi, char *buf)
+{
+ unsigned int n_total_au = 0;
+ unsigned int n_clean_au = 0;
+ unsigned int n_full_au = 0;
+ unsigned int n_dirty_au = 0;
+ unsigned int fr = 0;
+
+ n_total_au = fsapi_get_au_stat(sbi->host_sb, VOL_AU_STAT_TOTAL);
+ n_clean_au = fsapi_get_au_stat(sbi->host_sb, VOL_AU_STAT_CLEAN);
+ n_full_au = fsapi_get_au_stat(sbi->host_sb, VOL_AU_STAT_FULL);
+ n_dirty_au = n_total_au - (n_full_au + n_clean_au);
+
+ if (!n_dirty_au)
+ fr = 0;
+ else if (!n_clean_au)
+ fr = 100;
+ else
+ fr = (n_dirty_au * 100) / (n_clean_au + n_dirty_au);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", fr);
+}
+SDFAT_ATTR(fratio, 0444, fratio_show, NULL);
+
+static ssize_t totalau_show(struct sdfat_sb_info *sbi, char *buf)
+{
+ unsigned int n_au = 0;
+
+ n_au = fsapi_get_au_stat(sbi->host_sb, VOL_AU_STAT_TOTAL);
+ return snprintf(buf, PAGE_SIZE, "%u\n", n_au);
+}
+SDFAT_ATTR(totalau, 0444, totalau_show, NULL);
+
+static ssize_t cleanau_show(struct sdfat_sb_info *sbi, char *buf)
+{
+ unsigned int n_clean_au = 0;
+
+ n_clean_au = fsapi_get_au_stat(sbi->host_sb, VOL_AU_STAT_CLEAN);
+ return snprintf(buf, PAGE_SIZE, "%u\n", n_clean_au);
+}
+SDFAT_ATTR(cleanau, 0444, cleanau_show, NULL);
+
+static ssize_t fullau_show(struct sdfat_sb_info *sbi, char *buf)
+{
+ unsigned int n_full_au = 0;
+
+ n_full_au = fsapi_get_au_stat(sbi->host_sb, VOL_AU_STAT_FULL);
+ return snprintf(buf, PAGE_SIZE, "%u\n", n_full_au);
+}
+SDFAT_ATTR(fullau, 0444, fullau_show, NULL);
+
+static struct attribute *sdfat_attrs[] = {
+ &sdfat_attr_type.attr,
+ &sdfat_attr_eio.attr,
+ &sdfat_attr_fratio.attr,
+ &sdfat_attr_totalau.attr,
+ &sdfat_attr_cleanau.attr,
+ &sdfat_attr_fullau.attr,
+ NULL,
+};
+
+static struct kobj_type sdfat_ktype = {
+ .default_attrs = sdfat_attrs,
+ .sysfs_ops = &sdfat_attr_ops,
+};
+
+static ssize_t version_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ return snprintf(buff, PAGE_SIZE, "FS Version %s\n", SDFAT_VERSION);
+}
+
+static struct kobj_attribute version_attr = __ATTR_RO(version);
+
+static struct attribute *attributes[] = {
+ &version_attr.attr,
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = attributes,
+};
+
+/*======================================================================*/
+/* Super Block Read Operations */
+/*======================================================================*/
+
+enum {
+ Opt_uid,
+ Opt_gid,
+ Opt_umask,
+ Opt_dmask,
+ Opt_fmask,
+ Opt_allow_utime,
+ Opt_codepage,
+ Opt_charset,
+ Opt_utf8,
+ Opt_namecase,
+ Opt_tz_utc,
+ Opt_adj_hidsect,
+ Opt_delay,
+ Opt_smart,
+ Opt_ausize,
+ Opt_packing,
+ Opt_defrag,
+ Opt_symlink,
+ Opt_debug,
+ Opt_err_cont,
+ Opt_err_panic,
+ Opt_err_ro,
+ Opt_err,
+ Opt_discard,
+ Opt_fs,
+ Opt_adj_req,
+};
+
+static const match_table_t sdfat_tokens = {
+ {Opt_uid, "uid=%u"},
+ {Opt_gid, "gid=%u"},
+ {Opt_umask, "umask=%o"},
+ {Opt_dmask, "dmask=%o"},
+ {Opt_fmask, "fmask=%o"},
+ {Opt_allow_utime, "allow_utime=%o"},
+ {Opt_codepage, "codepage=%u"},
+ {Opt_charset, "iocharset=%s"},
+ {Opt_utf8, "utf8"},
+ {Opt_namecase, "namecase=%u"},
+ {Opt_tz_utc, "tz=UTC"},
+ {Opt_adj_hidsect, "adj_hid"},
+ {Opt_delay, "delay"},
+ {Opt_smart, "smart"},
+ {Opt_ausize, "ausize=%u"},
+ {Opt_packing, "packing=%u"},
+ {Opt_defrag, "defrag"},
+ {Opt_symlink, "symlink=%u"},
+ {Opt_debug, "debug"},
+ {Opt_err_cont, "errors=continue"},
+ {Opt_err_panic, "errors=panic"},
+ {Opt_err_ro, "errors=remount-ro"},
+ {Opt_discard, "discard"},
+ {Opt_fs, "fs=%s"},
+ {Opt_adj_req, "adj_req"},
+ {Opt_err, NULL}
+};
+
+static int parse_options(struct super_block *sb, char *options, int silent,
+ int *debug, struct sdfat_mount_options *opts)
+{
+ char *p;
+ substring_t args[MAX_OPT_ARGS];
+ int option, i;
+ char *tmpstr;
+
+ opts->fs_uid = current_uid();
+ opts->fs_gid = current_gid();
+ opts->fs_fmask = opts->fs_dmask = current->fs->umask;
+ opts->allow_utime = (unsigned short) -1;
+ opts->codepage = sdfat_default_codepage;
+ opts->iocharset = sdfat_default_iocharset;
+ opts->casesensitive = 0;
+ opts->utf8 = 0;
+ opts->adj_hidsect = 0;
+ opts->tz_utc = 0;
+ opts->improved_allocation = 0;
+ opts->amap_opt.pack_ratio = 0; // Default packing
+ opts->amap_opt.sect_per_au = 0;
+ opts->amap_opt.misaligned_sect = 0;
+ opts->symlink = 0;
+ opts->errors = SDFAT_ERRORS_RO;
+ opts->discard = 0;
+ *debug = 0;
+
+ if (!options)
+ goto out;
+
+ while ((p = strsep(&options, ",")) != NULL) {
+ int token;
+
+ if (!*p)
+ continue;
+ token = match_token(p, sdfat_tokens, args);
+ switch (token) {
+ case Opt_uid:
+ if (match_int(&args[0], &option))
+ return 0;
+ opts->fs_uid = make_kuid(current_user_ns(), option);
+ break;
+ case Opt_gid:
+ if (match_int(&args[0], &option))
+ return 0;
+ opts->fs_gid = make_kgid(current_user_ns(), option);
+ break;
+ case Opt_umask:
+ case Opt_dmask:
+ case Opt_fmask:
+ if (match_octal(&args[0], &option))
+ return 0;
+ if (token != Opt_dmask)
+ opts->fs_fmask = option;
+ if (token != Opt_fmask)
+ opts->fs_dmask = option;
+ break;
+ case Opt_allow_utime:
+ if (match_octal(&args[0], &option))
+ return 0;
+ opts->allow_utime = option & (S_IWGRP | S_IWOTH);
+ break;
+ case Opt_codepage:
+ if (match_int(&args[0], &option))
+ return 0;
+ opts->codepage = option;
+ break;
+ case Opt_charset:
+ if (opts->iocharset != sdfat_default_iocharset)
+ kfree(opts->iocharset);
+ tmpstr = match_strdup(&args[0]);
+ if (!tmpstr)
+ return -ENOMEM;
+ opts->iocharset = tmpstr;
+ break;
+ case Opt_namecase:
+ if (match_int(&args[0], &option))
+ return 0;
+ opts->casesensitive = (option > 0) ? 1:0;
+ break;
+ case Opt_utf8:
+ opts->utf8 = 1;
+ break;
+ case Opt_adj_hidsect:
+ opts->adj_hidsect = 1;
+ break;
+ case Opt_tz_utc:
+ opts->tz_utc = 1;
+ break;
+ case Opt_symlink:
+ if (match_int(&args[0], &option))
+ return 0;
+ opts->symlink = option > 0 ? 1 : 0;
+ break;
+ case Opt_delay:
+ opts->improved_allocation |= SDFAT_ALLOC_DELAY;
+ break;
+ case Opt_smart:
+ opts->improved_allocation |= SDFAT_ALLOC_SMART;
+ break;
+ case Opt_ausize:
+ if (match_int(&args[0], &option))
+ return -EINVAL;
+ if (!is_power_of_2(option))
+ return -EINVAL;
+ opts->amap_opt.sect_per_au = option;
+ IMSG("set AU size by option : %u sectors\n", option);
+ break;
+ case Opt_packing:
+ if (match_int(&args[0], &option))
+ return 0;
+ opts->amap_opt.pack_ratio = option;
+ break;
+ case Opt_defrag:
+#ifdef CONFIG_SDFAT_DFR
+ opts->defrag = 1;
+#else
+ IMSG("defragmentation config is not enabled. ignore\n");
+#endif
+ break;
+ case Opt_err_cont:
+ opts->errors = SDFAT_ERRORS_CONT;
+ break;
+ case Opt_err_panic:
+ opts->errors = SDFAT_ERRORS_PANIC;
+ break;
+ case Opt_err_ro:
+ opts->errors = SDFAT_ERRORS_RO;
+ break;
+ case Opt_debug:
+ *debug = 1;
+ break;
+ case Opt_discard:
+ opts->discard = 1;
+ break;
+ case Opt_fs:
+ tmpstr = match_strdup(&args[0]);
+ if (!tmpstr)
+ return -ENOMEM;
+ for (i = 0; i < FS_TYPE_MAX; i++) {
+ if (!strcmp(tmpstr, FS_TYPE_STR[i])) {
+ opts->fs_type = (unsigned char)i;
+ sdfat_log_msg(sb, KERN_ERR,
+ "set fs-type by option : %s",
+ FS_TYPE_STR[i]);
+ break;
+ }
+ }
+ kfree(tmpstr);
+ if (i == FS_TYPE_MAX) {
+ sdfat_log_msg(sb, KERN_ERR,
+ "invalid fs-type, "
+ "only allow auto, exfat, vfat");
+ return -EINVAL;
+ }
+ break;
+ case Opt_adj_req:
+#ifdef CONFIG_SDFAT_ALIGNED_MPAGE_WRITE
+ opts->adj_req = 1;
+#else
+ IMSG("adjust request config is not enabled. ignore\n");
+#endif
+ break;
+ default:
+ if (!silent) {
+ sdfat_msg(sb, KERN_ERR,
+ "unrecognized mount option \"%s\" "
+ "or missing value", p);
+ }
+ return -EINVAL;
+ }
+ }
+
+out:
+ if (opts->allow_utime == (unsigned short) -1)
+ opts->allow_utime = ~opts->fs_dmask & (S_IWGRP | S_IWOTH);
+
+ if (opts->utf8 && strcmp(opts->iocharset, sdfat_iocharset_with_utf8)) {
+ sdfat_msg(sb, KERN_WARNING,
+ "utf8 enabled, \"iocharset=%s\" is recommended",
+ sdfat_iocharset_with_utf8);
+ }
+
+ if (opts->discard) {
+ struct request_queue *q = bdev_get_queue(sb->s_bdev);
+
+ if (!blk_queue_discard(q))
+ sdfat_msg(sb, KERN_WARNING,
+ "mounting with \"discard\" option, but "
+ "the device does not support discard");
+ opts->discard = 0;
+ }
+
+ return 0;
+}
+
+static void sdfat_hash_init(struct super_block *sb)
+{
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ int i;
+
+ spin_lock_init(&sbi->inode_hash_lock);
+ for (i = 0; i < SDFAT_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&sbi->inode_hashtable[i]);
+}
+
+static int sdfat_read_root(struct inode *inode)
+{
+ struct super_block *sb = inode->i_sb;
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ sdfat_timespec_t ts;
+ FS_INFO_T *fsi = &(sbi->fsi);
+ DIR_ENTRY_T info;
+
+ ts = current_time(inode);
+
+ SDFAT_I(inode)->fid.dir.dir = fsi->root_dir;
+ SDFAT_I(inode)->fid.dir.flags = 0x01;
+ SDFAT_I(inode)->fid.entry = -1;
+ SDFAT_I(inode)->fid.start_clu = fsi->root_dir;
+ SDFAT_I(inode)->fid.flags = 0x01;
+ SDFAT_I(inode)->fid.type = TYPE_DIR;
+ SDFAT_I(inode)->fid.version = 0;
+ SDFAT_I(inode)->fid.rwoffset = 0;
+ SDFAT_I(inode)->fid.hint_bmap.off = CLUS_EOF;
+ SDFAT_I(inode)->fid.hint_stat.eidx = 0;
+ SDFAT_I(inode)->fid.hint_stat.clu = fsi->root_dir;
+ SDFAT_I(inode)->fid.hint_femp.eidx = -1;
+
+ SDFAT_I(inode)->target = NULL;
+
+ if (fsapi_read_inode(inode, &info) < 0)
+ return -EIO;
+
+ inode->i_uid = sbi->options.fs_uid;
+ inode->i_gid = sbi->options.fs_gid;
+ inode_inc_iversion(inode);
+ inode->i_generation = 0;
+ inode->i_mode = sdfat_make_mode(sbi, ATTR_SUBDIR, S_IRWXUGO);
+ inode->i_op = &sdfat_dir_inode_operations;
+ inode->i_fop = &sdfat_dir_operations;
+
+ i_size_write(inode, info.Size);
+ SDFAT_I(inode)->fid.size = info.Size;
+ inode->i_blocks = ((i_size_read(inode) + (fsi->cluster_size - 1))
+ & ~((loff_t)fsi->cluster_size - 1)) >> inode->i_blkbits;
+ SDFAT_I(inode)->i_pos = ((loff_t) fsi->root_dir << 32) | 0xffffffff;
+ SDFAT_I(inode)->i_size_aligned = i_size_read(inode);
+ SDFAT_I(inode)->i_size_ondisk = i_size_read(inode);
+
+ sdfat_save_attr(inode, ATTR_SUBDIR);
+ inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
+ set_nlink(inode, info.NumSubdirs + 2);
+ return 0;
+}
+
+
+
+static void setup_dops(struct super_block *sb)
+{
+ if (SDFAT_SB(sb)->options.casesensitive == 0)
+ sb->s_d_op = &sdfat_ci_dentry_ops;
+ else
+ sb->s_d_op = &sdfat_dentry_ops;
+}
+
+static int sdfat_fill_super(struct super_block *sb, void *data, int silent)
+{
+ struct inode *root_inode = NULL;
+ struct sdfat_sb_info *sbi;
+ int debug;
+ int err;
+ char buf[50];
+ struct block_device *bdev = sb->s_bdev;
+ dev_t bd_dev = bdev ? bdev->bd_dev : 0;
+
+ sdfat_log_msg(sb, KERN_INFO, "trying to mount...");
+
+ /*
+ * GFP_KERNEL is ok here, because while we do hold the
+ * supeblock lock, memory pressure can't call back into
+ * the filesystem, since we're only just about to mount
+ * it and have no inodes etc active!
+ */
+ sbi = kzalloc(sizeof(struct sdfat_sb_info), GFP_KERNEL);
+ if (!sbi) {
+ sdfat_log_msg(sb, KERN_INFO,
+ "trying to alloc sbi with vzalloc()");
+ sbi = vzalloc(sizeof(struct sdfat_sb_info));
+ if (!sbi) {
+ sdfat_log_msg(sb, KERN_ERR, "failed to mount! (ENOMEM)");
+ return -ENOMEM;
+ }
+ sbi->use_vmalloc = 1;
+ }
+
+ mutex_init(&sbi->s_vlock);
+ sb->s_fs_info = sbi;
+ sb->s_flags |= SB_NODIRATIME;
+ sb->s_magic = SDFAT_SUPER_MAGIC;
+ sb->s_op = &sdfat_sops;
+ ratelimit_state_init(&sbi->ratelimit, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
+ sb->s_time_gran = NSEC_PER_SEC; /* the same with default */
+ sb->s_time_min = SDFAT_MIN_TIMESTAMP_SECS;
+ sb->s_time_max = SDFAT_MAX_TIMESTAMP_SECS;
+#endif
+
+ err = parse_options(sb, data, silent, &debug, &sbi->options);
+ if (err) {
+ sdfat_log_msg(sb, KERN_ERR, "failed to parse options");
+ goto failed_mount;
+ }
+
+ setup_sdfat_xattr_handler(sb);
+ setup_sdfat_sync_super_wq(sb);
+ setup_dops(sb);
+
+ err = fsapi_mount(sb);
+ if (err) {
+ sdfat_log_msg(sb, KERN_ERR, "failed to recognize fat type");
+ goto failed_mount;
+ }
+
+ /* set up enough so that it can read an inode */
+ sdfat_hash_init(sb);
+
+ /*
+ * The low byte of FAT's first entry must have same value with
+ * media-field. But in real world, too many devices is
+ * writing wrong value. So, removed that validity check.
+ *
+ * if (FAT_FIRST_ENT(sb, media) != first)
+ */
+
+ err = -EINVAL;
+ sprintf(buf, "cp%d", sbi->options.codepage);
+ sbi->nls_disk = load_nls(buf);
+ if (!sbi->nls_disk) {
+ sdfat_log_msg(sb, KERN_ERR, "codepage %s not found", buf);
+ goto failed_mount2;
+ }
+
+ sbi->nls_io = load_nls(sbi->options.iocharset);
+ if (!sbi->nls_io) {
+ sdfat_log_msg(sb, KERN_ERR, "IO charset %s not found",
+ sbi->options.iocharset);
+ goto failed_mount2;
+ }
+
+ err = __alloc_dfr_mem_if_required(sb);
+ if (err) {
+ sdfat_log_msg(sb, KERN_ERR, "failed to initialize a memory for "
+ "defragmentation");
+ goto failed_mount3;
+ }
+
+ err = -ENOMEM;
+ root_inode = new_inode(sb);
+ if (!root_inode) {
+ sdfat_log_msg(sb, KERN_ERR, "failed to allocate root inode.");
+ goto failed_mount3;
+ }
+
+ root_inode->i_ino = SDFAT_ROOT_INO;
+ inode_set_iversion(root_inode, 1);
+
+ err = sdfat_read_root(root_inode);
+ if (err) {
+ sdfat_log_msg(sb, KERN_ERR, "failed to initialize root inode.");
+ goto failed_mount3;
+ }
+
+ sdfat_attach(root_inode, SDFAT_I(root_inode)->i_pos);
+ insert_inode_hash(root_inode);
+
+ err = -ENOMEM;
+ sb->s_root = __d_make_root(root_inode);
+ if (!sb->s_root) {
+ sdfat_msg(sb, KERN_ERR, "failed to get the root dentry");
+ goto failed_mount3;
+ }
+
+ /*
+ * Initialize filesystem attributes (for sysfs)
+ * ex: /sys/fs/sdfat/mmcblk1[179:17]
+ */
+ sbi->sb_kobj.kset = sdfat_kset;
+ err = kobject_init_and_add(&sbi->sb_kobj, &sdfat_ktype, NULL,
+ "%s[%d:%d]", sb->s_id, MAJOR(bd_dev), MINOR(bd_dev));
+ if (err) {
+ sdfat_msg(sb, KERN_ERR, "Unable to create sdfat attributes for"
+ " %s[%d:%d](%d)", sb->s_id,
+ MAJOR(bd_dev), MINOR(bd_dev), err);
+ goto failed_mount3;
+ }
+
+ sdfat_log_msg(sb, KERN_INFO, "mounted successfully!");
+ /* FOR BIGDATA */
+ sdfat_statistics_set_mnt(&sbi->fsi);
+ sdfat_statistics_set_vol_size(sb);
+ return 0;
+
+failed_mount3:
+ __free_dfr_mem_if_required(sb);
+failed_mount2:
+ fsapi_umount(sb);
+failed_mount:
+ sdfat_log_msg(sb, KERN_INFO, "failed to mount! (%d)", err);
+
+ if (root_inode)
+ iput(root_inode);
+ sb->s_root = NULL;
+
+ if (sbi->nls_io)
+ unload_nls(sbi->nls_io);
+ if (sbi->nls_disk)
+ unload_nls(sbi->nls_disk);
+ if (sbi->options.iocharset != sdfat_default_iocharset)
+ kfree(sbi->options.iocharset);
+ sb->s_fs_info = NULL;
+ if (!sbi->use_vmalloc)
+ kfree(sbi);
+ else
+ vfree(sbi);
+ return err;
+}
+
+static struct dentry *sdfat_fs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data) {
+ return mount_bdev(fs_type, flags, dev_name, data, sdfat_fill_super);
+}
+
+static void init_once(void *foo)
+{
+ struct sdfat_inode_info *ei = (struct sdfat_inode_info *)foo;
+
+ INIT_HLIST_NODE(&ei->i_hash_fat);
+ inode_init_once(&ei->vfs_inode);
+}
+
+static int __init sdfat_init_inodecache(void)
+{
+ sdfat_inode_cachep = kmem_cache_create("sdfat_inode_cache",
+ sizeof(struct sdfat_inode_info),
+ 0, (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD),
+ init_once);
+ if (!sdfat_inode_cachep)
+ return -ENOMEM;
+ return 0;
+}
+
+static void sdfat_destroy_inodecache(void)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+ /*
+ * Make sure all delayed rcu free inodes are flushed before we
+ * destroy cache.
+ */
+ rcu_barrier();
+#endif
+ kmem_cache_destroy(sdfat_inode_cachep);
+}
+
+#ifdef CONFIG_SDFAT_DBG_IOCTL
+static void sdfat_debug_kill_sb(struct super_block *sb)
+{
+ struct sdfat_sb_info *sbi = SDFAT_SB(sb);
+ struct block_device *bdev = sb->s_bdev;
+
+ long flags;
+
+ if (sbi) {
+ flags = sbi->debug_flags;
+
+ if (flags & SDFAT_DEBUGFLAGS_INVALID_UMOUNT) {
+ /* invalidate_bdev drops all device cache include dirty.
+ * we use this to simulate device removal
+ */
+ fsapi_cache_release(sb);
+ invalidate_bdev(bdev);
+ }
+ }
+
+ kill_block_super(sb);
+}
+#endif /* CONFIG_SDFAT_DBG_IOCTL */
+
+static struct file_system_type sdfat_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "sdfat",
+ .mount = sdfat_fs_mount,
+#ifdef CONFIG_SDFAT_DBG_IOCTL
+ .kill_sb = sdfat_debug_kill_sb,
+#else
+ .kill_sb = kill_block_super,
+#endif /* CONFIG_SDFAT_DBG_IOCTL */
+ .fs_flags = FS_REQUIRES_DEV,
+};
+
+static int __init init_sdfat_fs(void)
+{
+ int err;
+
+ sdfat_log_version();
+ err = fsapi_init();
+ if (err)
+ goto error;
+
+ sdfat_kset = kset_create_and_add("sdfat", NULL, fs_kobj);
+ if (!sdfat_kset) {
+ pr_err("[SDFAT] failed to create sdfat kset\n");
+ err = -ENOMEM;
+ goto error;
+ }
+
+ err = sysfs_create_group(&sdfat_kset->kobj, &attr_group);
+ if (err) {
+ pr_err("[SDFAT] failed to create sdfat version attributes\n");
+ goto error;
+ }
+
+ err = sdfat_statistics_init(sdfat_kset);
+ if (err)
+ goto error;
+
+ err = sdfat_uevent_init(sdfat_kset);
+ if (err)
+ goto error;
+
+ err = sdfat_init_inodecache();
+ if (err) {
+ pr_err("[SDFAT] failed to initialize inode cache\n");
+ goto error;
+ }
+
+ err = register_filesystem(&sdfat_fs_type);
+ if (err) {
+ pr_err("[SDFAT] failed to register filesystem\n");
+ goto error;
+ }
+
+ return 0;
+error:
+ sdfat_uevent_uninit();
+ sdfat_statistics_uninit();
+
+ if (sdfat_kset) {
+ sysfs_remove_group(&sdfat_kset->kobj, &attr_group);
+ kset_unregister(sdfat_kset);
+ sdfat_kset = NULL;
+ }
+
+ sdfat_destroy_inodecache();
+ fsapi_shutdown();
+
+ pr_err("[SDFAT] failed to initialize FS driver(err:%d)\n", err);
+ return err;
+}
+
+static void __exit exit_sdfat_fs(void)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+ /*
+ * Make sure all delayed rcu free inodes are flushed before we
+ * destroy cache.
+ */
+ rcu_barrier();
+#endif
+ sdfat_uevent_uninit();
+ sdfat_statistics_uninit();
+
+ if (sdfat_kset) {
+ sysfs_remove_group(&sdfat_kset->kobj, &attr_group);
+ kset_unregister(sdfat_kset);
+ sdfat_kset = NULL;
+ }
+
+ sdfat_destroy_inodecache();
+ unregister_filesystem(&sdfat_fs_type);
+
+ fsapi_shutdown();
+}
+
+module_init(init_sdfat_fs);
+module_exit(exit_sdfat_fs);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FAT/exFAT filesystem support");
+MODULE_AUTHOR("Samsung Electronics Co., Ltd.");
+
--- /dev/null
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _SDFAT_H
+#define _SDFAT_H
+
+#include <linux/buffer_head.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/nls.h>
+#include <linux/fs.h>
+#include <linux/mutex.h>
+#include <linux/ratelimit.h>
+#include <linux/version.h>
+#include <linux/kobject.h>
+#include "api.h"
+
+#ifdef CONFIG_SDFAT_DFR
+#include "dfr.h"
+#endif
+
+/*
+ * sdfat error flags
+ */
+#define SDFAT_ERRORS_CONT (1) /* ignore error and continue */
+#define SDFAT_ERRORS_PANIC (2) /* panic on error */
+#define SDFAT_ERRORS_RO (3) /* remount r/o on error */
+
+/*
+ * sdfat allocator flags
+ */
+#define SDFAT_ALLOC_DELAY (1) /* Delayed allocation */
+#define SDFAT_ALLOC_SMART (2) /* Smart allocation */
+
+/*
+ * sdfat allocator destination for smart allocation
+ */
+#define ALLOC_NOWHERE (0)
+#define ALLOC_COLD (1)
+#define ALLOC_HOT (16)
+#define ALLOC_COLD_ALIGNED (1)
+#define ALLOC_COLD_PACKING (2)
+#define ALLOC_COLD_SEQ (4)
+
+/*
+ * sdfat nls lossy flag
+ */
+#define NLS_NAME_NO_LOSSY (0x00) /* no lossy */
+#define NLS_NAME_LOSSY (0x01) /* just detected incorrect filename(s) */
+#define NLS_NAME_OVERLEN (0x02) /* the length is over than its limit */
+
+/*
+ * sdfat common MACRO
+ */
+#define CLUSTER_16(x) ((u16)((x) & 0xFFFFU))
+#define CLUSTER_32(x) ((u32)((x) & 0xFFFFFFFFU))
+#define CLUS_EOF CLUSTER_32(~0)
+#define CLUS_BAD (0xFFFFFFF7U)
+#define CLUS_FREE (0)
+#define CLUS_BASE (2)
+#define IS_CLUS_EOF(x) ((x) == CLUS_EOF)
+#define IS_CLUS_BAD(x) ((x) == CLUS_BAD)
+#define IS_CLUS_FREE(x) ((x) == CLUS_FREE)
+#define IS_LAST_SECT_IN_CLUS(fsi, sec) \
+ ((((sec) - (fsi)->data_start_sector + 1) \
+ & ((1 << (fsi)->sect_per_clus_bits) - 1)) == 0)
+
+#define CLUS_TO_SECT(fsi, x) \
+ ((((unsigned long long)(x) - CLUS_BASE) << (fsi)->sect_per_clus_bits) + (fsi)->data_start_sector)
+
+#define SECT_TO_CLUS(fsi, sec) \
+ ((u32)((((sec) - (fsi)->data_start_sector) >> (fsi)->sect_per_clus_bits) + CLUS_BASE))
+
+/* variables defined at sdfat.c */
+extern const char *FS_TYPE_STR[];
+
+enum {
+ FS_TYPE_AUTO,
+ FS_TYPE_EXFAT,
+ FS_TYPE_VFAT,
+ FS_TYPE_MAX
+};
+
+/*
+ * sdfat mount in-memory data
+ */
+struct sdfat_mount_options {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
+ kuid_t fs_uid;
+ kgid_t fs_gid;
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0) */
+ uid_t fs_uid;
+ gid_t fs_gid;
+#endif
+ unsigned short fs_fmask;
+ unsigned short fs_dmask;
+ unsigned short allow_utime; /* permission for setting the [am]time */
+ unsigned short codepage; /* codepage for shortname conversions */
+ char *iocharset; /* charset for filename input/display */
+ struct {
+ unsigned int pack_ratio;
+ unsigned int sect_per_au;
+ unsigned int misaligned_sect;
+ } amap_opt; /* AMAP-related options (see amap.c) */
+
+ unsigned char utf8;
+ unsigned char casesensitive;
+ unsigned char adj_hidsect;
+ unsigned char tz_utc;
+ unsigned char improved_allocation;
+ unsigned char defrag;
+ unsigned char symlink; /* support symlink operation */
+ unsigned char errors; /* on error: continue, panic, remount-ro */
+ unsigned char discard; /* flag on if -o dicard specified and device support discard() */
+ unsigned char fs_type; /* fs_type that user specified */
+ unsigned short adj_req; /* support aligned mpage write */
+};
+
+#define SDFAT_HASH_BITS 8
+#define SDFAT_HASH_SIZE (1UL << SDFAT_HASH_BITS)
+
+/*
+ * SDFAT file system superblock in-memory data
+ */
+struct sdfat_sb_info {
+ FS_INFO_T fsi; /* private filesystem info */
+
+ struct mutex s_vlock; /* volume lock */
+ int use_vmalloc;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+ struct rcu_head rcu;
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
+ int s_dirt;
+ struct mutex s_lock; /* superblock lock */
+ int write_super_queued; /* Write_super work is pending? */
+ struct delayed_work write_super_work; /* Work_queue data structrue for write_super() */
+ spinlock_t work_lock; /* Lock for WQ */
+#endif
+ struct super_block *host_sb; /* sb pointer */
+ struct sdfat_mount_options options;
+ struct nls_table *nls_disk; /* Codepage used on disk */
+ struct nls_table *nls_io; /* Charset used for input and display */
+ struct ratelimit_state ratelimit;
+
+ spinlock_t inode_hash_lock;
+ struct hlist_head inode_hashtable[SDFAT_HASH_SIZE];
+ struct kobject sb_kobj;
+#ifdef CONFIG_SDFAT_DBG_IOCTL
+ long debug_flags;
+#endif /* CONFIG_SDFAT_DBG_IOCTL */
+
+#ifdef CONFIG_SDFAT_DFR
+ struct defrag_info dfr_info;
+ struct completion dfr_complete;
+ unsigned int *dfr_new_clus;
+ int dfr_new_idx;
+ unsigned int *dfr_page_wb;
+ void **dfr_pagep;
+ unsigned int dfr_hint_clus;
+ unsigned int dfr_hint_idx;
+ int dfr_reserved_clus;
+
+#ifdef CONFIG_SDFAT_DFR_DEBUG
+ int dfr_spo_flag;
+#endif /* CONFIG_SDFAT_DFR_DEBUG */
+
+#endif /* CONFIG_SDFAT_DFR */
+
+#ifdef CONFIG_SDFAT_TRACE_IO
+ /* Statistics for allocator */
+ unsigned int stat_n_pages_written; /* # of written pages in total */
+ unsigned int stat_n_pages_added; /* # of added blocks in total */
+ unsigned int stat_n_bdev_pages_written; /* # of written pages owned by bdev inode */
+ unsigned int stat_n_pages_confused;
+#endif
+ atomic_t stat_n_pages_queued; /* # of pages in the request queue (approx.) */
+};
+
+/*
+ * SDFAT file system inode in-memory data
+ */
+struct sdfat_inode_info {
+ FILE_ID_T fid;
+ char *target;
+ /* NOTE: i_size_ondisk is 64bits, so must hold ->inode_lock to access */
+ loff_t i_size_ondisk; /* physically allocated size */
+ loff_t i_size_aligned; /* block-aligned i_size (used in cont_write_begin) */
+ loff_t i_pos; /* on-disk position of directory entry or 0 */
+ struct hlist_node i_hash_fat; /* hash by i_location */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
+ struct rw_semaphore truncate_lock; /* protect bmap against truncate */
+#endif
+#ifdef CONFIG_SDFAT_DFR
+ struct defrag_info dfr_info;
+#endif
+ struct inode vfs_inode;
+};
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0)
+typedef struct timespec64 sdfat_timespec_t;
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0) */
+typedef struct timespec sdfat_timespec_t;
+#endif
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) */
+/*
+ * sb->s_flags. Note that these mirror the equivalent MS_* flags where
+ * represented in both.
+ */
+#define SB_RDONLY 1 /* Mount read-only */
+#define SB_NODIRATIME 2048 /* Do not update directory access times */
+static inline bool sb_rdonly(const struct super_block *sb)
+{
+ return sb->s_flags & MS_RDONLY;
+}
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
+ /* EMPTY */
+#else
+static inline sdfat_timespec_t current_time(struct inode *inode)
+{
+ return CURRENT_TIME_SEC;
+}
+#endif
+/*
+ * FIXME : needs on-disk-slot in-memory data
+ */
+
+/* static inline functons */
+static inline const char *sdfat_get_vol_type_str(unsigned int type)
+{
+ if (type == EXFAT)
+ return "exfat";
+ else if (type == FAT32)
+ return "vfat:32";
+ else if (type == FAT16)
+ return "vfat:16";
+ else if (type == FAT12)
+ return "vfat:12";
+
+ return "unknown";
+}
+
+static inline struct sdfat_sb_info *SDFAT_SB(struct super_block *sb)
+{
+ return (struct sdfat_sb_info *)sb->s_fs_info;
+}
+
+static inline struct sdfat_inode_info *SDFAT_I(struct inode *inode)
+{
+ return container_of(inode, struct sdfat_inode_info, vfs_inode);
+}
+
+/*
+ * If ->i_mode can't hold S_IWUGO (i.e. ATTR_RO), we use ->i_attrs to
+ * save ATTR_RO instead of ->i_mode.
+ *
+ * If it's directory and !sbi->options.rodir, ATTR_RO isn't read-only
+ * bit, it's just used as flag for app.
+ */
+static inline int sdfat_mode_can_hold_ro(struct inode *inode)
+{
+ struct sdfat_sb_info *sbi = SDFAT_SB(inode->i_sb);
+
+ if (S_ISDIR(inode->i_mode))
+ return 0;
+
+ if ((~sbi->options.fs_fmask) & S_IWUGO)
+ return 1;
+ return 0;
+}
+
+/*
+ * FIXME : needs to check symlink option.
+ */
+/* Convert attribute bits and a mask to the UNIX mode. */
+static inline mode_t sdfat_make_mode(struct sdfat_sb_info *sbi,
+ u32 attr, mode_t mode)
+{
+ if ((attr & ATTR_READONLY) && !(attr & ATTR_SUBDIR))
+ mode &= ~S_IWUGO;
+
+ if (attr & ATTR_SUBDIR)
+ return (mode & ~sbi->options.fs_dmask) | S_IFDIR;
+ else if (attr & ATTR_SYMLINK)
+ return (mode & ~sbi->options.fs_dmask) | S_IFLNK;
+ else
+ return (mode & ~sbi->options.fs_fmask) | S_IFREG;
+}
+
+/* Return the FAT attribute byte for this inode */
+static inline u32 sdfat_make_attr(struct inode *inode)
+{
+ u32 attrs = SDFAT_I(inode)->fid.attr;
+
+ if (S_ISDIR(inode->i_mode))
+ attrs |= ATTR_SUBDIR;
+ if (sdfat_mode_can_hold_ro(inode) && !(inode->i_mode & S_IWUGO))
+ attrs |= ATTR_READONLY;
+ return attrs;
+}
+
+static inline void sdfat_save_attr(struct inode *inode, u32 attr)
+{
+ if (sdfat_mode_can_hold_ro(inode))
+ SDFAT_I(inode)->fid.attr = attr & ATTR_RWMASK;
+ else
+ SDFAT_I(inode)->fid.attr = attr & (ATTR_RWMASK | ATTR_READONLY);
+}
+
+/* sdfat/statistics.c */
+/* bigdata function */
+#ifdef CONFIG_SDFAT_STATISTICS
+extern int sdfat_statistics_init(struct kset *sdfat_kset);
+extern void sdfat_statistics_uninit(void);
+extern void sdfat_statistics_set_mnt(FS_INFO_T *fsi);
+extern void sdfat_statistics_set_mnt_ro(void);
+extern void sdfat_statistics_set_mkdir(u8 flags);
+extern void sdfat_statistics_set_create(u8 flags);
+extern void sdfat_statistics_set_rw(u8 flags, u32 clu_offset, s32 create);
+extern void sdfat_statistics_set_trunc(u8 flags, CHAIN_T *clu);
+extern void sdfat_statistics_set_vol_size(struct super_block *sb);
+#else
+static inline int sdfat_statistics_init(struct kset *sdfat_kset)
+{
+ return 0;
+}
+static inline void sdfat_statistics_uninit(void) {};
+static inline void sdfat_statistics_set_mnt(FS_INFO_T *fsi) {};
+static inline void sdfat_statistics_set_mnt_ro(void) {};
+static inline void sdfat_statistics_set_mkdir(u8 flags) {};
+static inline void sdfat_statistics_set_create(u8 flags) {};
+static inline void sdfat_statistics_set_rw(u8 flags, u32 clu_offset, s32 create) {};
+static inline void sdfat_statistics_set_trunc(u8 flags, CHAIN_T *clu) {};
+static inline void sdfat_statistics_set_vol_size(struct super_block *sb) {};
+#endif
+
+/* sdfat/nls.c */
+/* NLS management function */
+s32 nls_cmp_sfn(struct super_block *sb, u8 *a, u8 *b);
+s32 nls_cmp_uniname(struct super_block *sb, u16 *a, u16 *b);
+s32 nls_uni16s_to_sfn(struct super_block *sb, UNI_NAME_T *p_uniname, DOS_NAME_T *p_dosname, s32 *p_lossy);
+s32 nls_sfn_to_uni16s(struct super_block *sb, DOS_NAME_T *p_dosname, UNI_NAME_T *p_uniname);
+s32 nls_uni16s_to_vfsname(struct super_block *sb, UNI_NAME_T *uniname, u8 *p_cstring, s32 len);
+s32 nls_vfsname_to_uni16s(struct super_block *sb, const u8 *p_cstring,
+ const s32 len, UNI_NAME_T *uniname, s32 *p_lossy);
+
+/* sdfat/mpage.c */
+#ifdef CONFIG_SDFAT_ALIGNED_MPAGE_WRITE
+int sdfat_mpage_writepages(struct address_space *mapping,
+ struct writeback_control *wbc, get_block_t *get_block);
+#endif
+
+/* sdfat/xattr.c */
+#ifdef CONFIG_SDFAT_VIRTUAL_XATTR
+void setup_sdfat_xattr_handler(struct super_block *sb);
+extern int sdfat_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags);
+extern ssize_t sdfat_getxattr(struct dentry *dentry, const char *name, void *value, size_t size);
+extern ssize_t sdfat_listxattr(struct dentry *dentry, char *list, size_t size);
+extern int sdfat_removexattr(struct dentry *dentry, const char *name);
+#else
+static inline void setup_sdfat_xattr_handler(struct super_block *sb) {};
+#endif
+
+/* sdfat/misc.c */
+#ifdef CONFIG_SDFAT_UEVENT
+extern int sdfat_uevent_init(struct kset *sdfat_kset);
+extern void sdfat_uevent_uninit(void);
+extern void sdfat_uevent_ro_remount(struct super_block *sb);
+#else
+static inline int sdfat_uevent_init(struct kset *sdfat_kset)
+{
+ return 0;
+}
+static inline void sdfat_uevent_uninit(void) {};
+static inline void sdfat_uevent_ro_remount(struct super_block *sb) {};
+#endif
+extern void
+__sdfat_fs_error(struct super_block *sb, int report, const char *fmt, ...)
+ __printf(3, 4) __cold;
+#define sdfat_fs_error(sb, fmt, args...) \
+ __sdfat_fs_error(sb, 1, fmt, ## args)
+#define sdfat_fs_error_ratelimit(sb, fmt, args...) \
+ __sdfat_fs_error(sb, __ratelimit(&SDFAT_SB(sb)->ratelimit), fmt, ## args)
+extern void
+__sdfat_msg(struct super_block *sb, const char *lv, int st, const char *fmt, ...)
+ __printf(4, 5) __cold;
+#define sdfat_msg(sb, lv, fmt, args...) \
+ __sdfat_msg(sb, lv, 0, fmt, ## args)
+#define sdfat_log_msg(sb, lv, fmt, args...) \
+ __sdfat_msg(sb, lv, 1, fmt, ## args)
+extern void sdfat_log_version(void);
+extern void sdfat_time_fat2unix(struct sdfat_sb_info *sbi, sdfat_timespec_t *ts,
+ DATE_TIME_T *tp);
+extern void sdfat_time_unix2fat(struct sdfat_sb_info *sbi, sdfat_timespec_t *ts,
+ DATE_TIME_T *tp);
+extern TIMESTAMP_T *tm_now(struct inode *inode, TIMESTAMP_T *tm);
+static inline TIMESTAMP_T *tm_now_sb(struct super_block *sb, TIMESTAMP_T *tm)
+{
+ struct inode fake_inode;
+
+ fake_inode.i_sb = sb;
+ return tm_now(&fake_inode, tm);
+}
+
+#ifdef CONFIG_SDFAT_DEBUG
+
+#ifdef CONFIG_SDFAT_DBG_CAREFUL
+void sdfat_debug_check_clusters(struct inode *inode);
+#else
+#define sdfat_debug_check_clusters(inode)
+#endif /* CONFIG_SDFAT_DBG_CAREFUL */
+
+#ifdef CONFIG_SDFAT_DBG_BUGON
+#define sdfat_debug_bug_on(expr) BUG_ON(expr)
+#else
+#define sdfat_debug_bug_on(expr)
+#endif
+
+#ifdef CONFIG_SDFAT_DBG_WARNON
+#define sdfat_debug_warn_on(expr) WARN_ON(expr)
+#else
+#define sdfat_debug_warn_on(expr)
+#endif
+
+#else /* CONFIG_SDFAT_DEBUG */
+
+#define sdfat_debug_check_clusters(inode)
+#define sdfat_debug_bug_on(expr)
+
+#endif /* CONFIG_SDFAT_DEBUG */
+
+#ifdef CONFIG_SDFAT_TRACE_ELAPSED_TIME
+u32 sdfat_time_current_usec(struct timeval *tv);
+extern struct timeval __t1;
+extern struct timeval __t2;
+
+#define TIME_GET(tv) sdfat_time_current_usec(tv)
+#define TIME_START(s) sdfat_time_current_usec(s)
+#define TIME_END(e) sdfat_time_current_usec(e)
+#define TIME_ELAPSED(s, e) ((u32)(((e)->tv_sec - (s)->tv_sec) * 1000000 + \
+ ((e)->tv_usec - (s)->tv_usec)))
+#define PRINT_TIME(n) pr_info("[SDFAT] Elapsed time %d = %d (usec)\n", n, (__t2 - __t1))
+#else /* CONFIG_SDFAT_TRACE_ELAPSED_TIME */
+#define TIME_GET(tv) (0)
+#define TIME_START(s)
+#define TIME_END(e)
+#define TIME_ELAPSED(s, e) (0)
+#define PRINT_TIME(n)
+#endif /* CONFIG_SDFAT_TRACE_ELAPSED_TIME */
+
+#define SDFAT_MSG_LV_NONE (0x00000000)
+#define SDFAT_MSG_LV_ERR (0x00000001)
+#define SDFAT_MSG_LV_INFO (0x00000002)
+#define SDFAT_MSG_LV_DBG (0x00000003)
+#define SDFAT_MSG_LV_MORE (0x00000004)
+#define SDFAT_MSG_LV_TRACE (0x00000005)
+#define SDFAT_MSG_LV_ALL (0x00000006)
+
+#define SDFAT_MSG_LEVEL SDFAT_MSG_LV_INFO
+
+#define SDFAT_TAG_NAME "SDFAT"
+#define __S(x) #x
+#define _S(x) __S(x)
+
+extern void __sdfat_dmsg(int level, const char *fmt, ...) __printf(2, 3) __cold;
+
+#define SDFAT_EMSG_T(level, ...) \
+ __sdfat_dmsg(level, KERN_ERR "[" SDFAT_TAG_NAME "] [" _S(__FILE__) "(" _S(__LINE__) ")] " __VA_ARGS__)
+#define SDFAT_DMSG_T(level, ...) \
+ __sdfat_dmsg(level, KERN_INFO "[" SDFAT_TAG_NAME "] " __VA_ARGS__)
+
+#define SDFAT_EMSG(...) SDFAT_EMSG_T(SDFAT_MSG_LV_ERR, __VA_ARGS__)
+#define SDFAT_IMSG(...) SDFAT_DMSG_T(SDFAT_MSG_LV_INFO, __VA_ARGS__)
+#define SDFAT_DMSG(...) SDFAT_DMSG_T(SDFAT_MSG_LV_DBG, __VA_ARGS__)
+#define SDFAT_MMSG(...) SDFAT_DMSG_T(SDFAT_MSG_LV_MORE, __VA_ARGS__)
+#define SDFAT_TMSG(...) SDFAT_DMSG_T(SDFAT_MSG_LV_TRACE, __VA_ARGS__)
+
+#define EMSG(...)
+#define IMSG(...)
+#define DMSG(...)
+#define MMSG(...)
+#define TMSG(...)
+
+#define EMSG_VAR(exp)
+#define IMSG_VAR(exp)
+#define DMSG_VAR(exp)
+#define MMSG_VAR(exp)
+#define TMSG_VAR(exp)
+
+#ifdef CONFIG_SDFAT_DBG_MSG
+
+
+#if (SDFAT_MSG_LEVEL >= SDFAT_MSG_LV_ERR)
+#undef EMSG
+#undef EMSG_VAR
+#define EMSG(...) SDFAT_EMSG(__VA_ARGS__)
+#define EMSG_VAR(exp) exp
+#endif
+
+#if (SDFAT_MSG_LEVEL >= SDFAT_MSG_LV_INFO)
+#undef IMSG
+#undef IMSG_VAR
+#define IMSG(...) SDFAT_IMSG(__VA_ARGS__)
+#define IMSG_VAR(exp) exp
+#endif
+
+#if (SDFAT_MSG_LEVEL >= SDFAT_MSG_LV_DBG)
+#undef DMSG
+#undef DMSG_VAR
+#define DMSG(...) SDFAT_DMSG(__VA_ARGS__)
+#define DMSG_VAR(exp) exp
+#endif
+
+#if (SDFAT_MSG_LEVEL >= SDFAT_MSG_LV_MORE)
+#undef MMSG
+#undef MMSG_VAR
+#define MMSG(...) SDFAT_MMSG(__VA_ARGS__)
+#define MMSG_VAR(exp) exp
+#endif
+
+/* should replace with trace function */
+#if (SDFAT_MSG_LEVEL >= SDFAT_MSG_LV_TRACE)
+#undef TMSG
+#undef TMSG_VAR
+#define TMSG(...) SDFAT_TMSG(__VA_ARGS__)
+#define TMSG_VAR(exp) exp
+#endif
+
+#endif /* CONFIG_SDFAT_DBG_MSG */
+
+
+#define ASSERT(expr) { \
+ if (!(expr)) { \
+ pr_err("Assertion failed! %s\n", #expr); \
+ BUG_ON(1); \
+ } \
+}
+
+#endif /* !_SDFAT_H */
+
--- /dev/null
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _SDFAT_FS_H
+#define _SDFAT_FS_H
+
+#include <linux/types.h>
+#include <linux/magic.h>
+#include <asm/byteorder.h>
+
+/*----------------------------------------------------------------------*/
+/* Constant & Macro Definitions */
+/*----------------------------------------------------------------------*/
+#ifndef MSDOS_SUPER_MAGIC
+#define MSDOS_SUPER_MAGIC 0x4d44 /* MD */
+#endif
+
+#ifndef EXFAT_SUPER_MAGIC
+#define EXFAT_SUPER_MAGIC (0x2011BAB0UL)
+#endif /* EXFAT_SUPER_MAGIC */
+
+#ifndef SDFAT_SUPER_MAGIC
+#define SDFAT_SUPER_MAGIC (0x5EC5DFA4UL)
+#endif /* SDFAT_SUPER_MAGIC */
+
+#define SDFAT_ROOT_INO 1
+
+/* FAT types */
+#define FAT12 0x01 // FAT12
+#define FAT16 0x0E // Win95 FAT16 (LBA)
+#define FAT32 0x0C // Win95 FAT32 (LBA)
+#define EXFAT 0x07 // exFAT
+
+/* directory file name */
+#define DOS_CUR_DIR_NAME ". "
+#define DOS_PAR_DIR_NAME ".. "
+
+#ifdef __LITTLE_ENDIAN
+#define UNI_CUR_DIR_NAME ".\0"
+#define UNI_PAR_DIR_NAME ".\0.\0"
+#else
+#define UNI_CUR_DIR_NAME "\0."
+#define UNI_PAR_DIR_NAME "\0.\0."
+#endif
+
+/* file name lengths */
+/* NOTE :
+ * The maximum length of input or output is limited to 256 including NULL,
+ * But we allocate 4 extra bytes for utf8 translation reside in last position,
+ * because utf8 can uses memory upto 6 bytes per one character.
+ * Therefore, MAX_CHARSET_SIZE supports upto 6 bytes for utf8
+ */
+#define MAX_UNINAME_BUF_SIZE (((MAX_NAME_LENGTH+1)*2)+4)
+#define MAX_DOSNAME_BUF_SIZE ((DOS_NAME_LENGTH+2)+6)
+#define MAX_VFSNAME_BUF_SIZE ((MAX_NAME_LENGTH+1)*MAX_CHARSET_SIZE)
+#define MAX_CHARSET_SIZE 6 // max size of multi-byte character
+#define MAX_NAME_LENGTH 255 // max len of file name excluding NULL
+#define DOS_NAME_LENGTH 11 // DOS file name length excluding NULL
+
+#define SECTOR_SIZE_BITS 9 /* VFS sector size is 512 bytes */
+
+#define DENTRY_SIZE 32 /* directory entry size */
+#define DENTRY_SIZE_BITS 5
+
+#define MAX_FAT_DENTRIES 65536 /* FAT allows 65536 directory entries */
+#define MAX_EXFAT_DENTRIES 8388608 /* exFAT allows 8388608(256MB) directory entries */
+
+/* PBR entries */
+#define PBR_SIGNATURE 0xAA55
+#define EXT_SIGNATURE 0xAA550000
+#define VOL_LABEL "NO NAME " /* size should be 11 */
+#define OEM_NAME "MSWIN4.1" /* size should be 8 */
+#define STR_FAT12 "FAT12 " /* size should be 8 */
+#define STR_FAT16 "FAT16 " /* size should be 8 */
+#define STR_FAT32 "FAT32 " /* size should be 8 */
+#define STR_EXFAT "EXFAT " /* size should be 8 */
+
+#define VOL_CLEAN 0x0000
+#define VOL_DIRTY 0x0002
+
+#define FAT_VOL_DIRTY 0x01
+
+/* max number of clusters */
+#define FAT12_THRESHOLD 4087 // 2^12 - 1 + 2 (clu 0 & 1)
+#define FAT16_THRESHOLD 65527 // 2^16 - 1 + 2
+#define FAT32_THRESHOLD 268435457 // 2^28 - 1 + 2
+#define EXFAT_THRESHOLD 268435457 // 2^28 - 1 + 2
+
+/* dentry types */
+#define MSDOS_DELETED 0xE5 /* deleted mark */
+#define MSDOS_UNUSED 0x00 /* end of directory */
+
+#define EXFAT_UNUSED 0x00 /* end of directory */
+#define IS_EXFAT_DELETED(x) ((x) < 0x80) /* deleted file (0x01~0x7F) */
+#define EXFAT_INVAL 0x80 /* invalid value */
+#define EXFAT_BITMAP 0x81 /* allocation bitmap */
+#define EXFAT_UPCASE 0x82 /* upcase table */
+#define EXFAT_VOLUME 0x83 /* volume label */
+#define EXFAT_FILE 0x85 /* file or dir */
+#define EXFAT_STREAM 0xC0 /* stream entry */
+#define EXFAT_NAME 0xC1 /* file name entry */
+#define EXFAT_ACL 0xC2 /* stream entry */
+
+/* specific flag */
+#define MSDOS_LAST_LFN 0x40
+
+/* file attributes */
+#define ATTR_NORMAL 0x0000
+#define ATTR_READONLY 0x0001
+#define ATTR_HIDDEN 0x0002
+#define ATTR_SYSTEM 0x0004
+#define ATTR_VOLUME 0x0008
+#define ATTR_SUBDIR 0x0010
+#define ATTR_ARCHIVE 0x0020
+#define ATTR_SYMLINK 0x0040
+#define ATTR_EXTEND (ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM | \
+ ATTR_VOLUME) /* 0x000F */
+
+#define ATTR_EXTEND_MASK (ATTR_EXTEND | ATTR_SUBDIR | ATTR_ARCHIVE)
+#define ATTR_RWMASK (ATTR_HIDDEN | ATTR_SYSTEM | ATTR_VOLUME | \
+ ATTR_SUBDIR | ATTR_ARCHIVE | ATTR_SYMLINK)/* 0x007E */
+
+/* file creation modes */
+#define FM_REGULAR 0x00
+#define FM_SYMLINK 0x40
+
+/* time modes */
+#define TM_CREATE 0
+#define TM_MODIFY 1
+#define TM_ACCESS 2
+
+/* checksum types */
+#define CS_DIR_ENTRY 0
+#define CS_PBR_SECTOR 1
+#define CS_DEFAULT 2
+
+/* time min/max */
+/* Jan 1 GMT 00:00:00 1980 */
+#define SDFAT_MIN_TIMESTAMP_SECS 315532800LL
+/* Dec 31 GMT 23:59:59 2107 */
+#define SDFAT_MAX_TIMESTAMP_SECS 4354819199LL
+
+
+/*
+ * ioctl command
+ */
+#define SDFAT_IOCTL_GET_VOLUME_ID _IOR('r', 0x12, __u32)
+#define SDFAT_IOCTL_DFR_INFO _IOC(_IOC_NONE, 'E', 0x13, sizeof(u32))
+#define SDFAT_IOCTL_DFR_TRAV _IOC(_IOC_NONE, 'E', 0x14, sizeof(u32))
+#define SDFAT_IOCTL_DFR_REQ _IOC(_IOC_NONE, 'E', 0x15, sizeof(u32))
+#define SDFAT_IOCTL_DFR_SPO_FLAG _IOC(_IOC_NONE, 'E', 0x16, sizeof(u32))
+#define SDFAT_IOCTL_PANIC _IOC(_IOC_NONE, 'E', 0x17, sizeof(u32))
+
+/*
+ * ioctl command for debugging
+ */
+
+/*
+ * IOCTL code 'f' used by
+ * - file systems typically #0~0x1F
+ * - embedded terminal devices #128~
+ * - exts for debugging purpose #99
+ * number 100 and 101 is available now but has possible conflicts
+ *
+ * NOTE : This is available only If CONFIG_SDFAT_DVBG_IOCTL is enabled.
+ *
+ */
+#define SDFAT_IOC_GET_DEBUGFLAGS _IOR('f', 100, long)
+#define SDFAT_IOC_SET_DEBUGFLAGS _IOW('f', 101, long)
+
+#define SDFAT_DEBUGFLAGS_INVALID_UMOUNT 0x01
+#define SDFAT_DEBUGFLAGS_ERROR_RW 0x02
+
+/*----------------------------------------------------------------------*/
+/* On-Disk Type Definitions */
+/*----------------------------------------------------------------------*/
+
+/* FAT12/16/32 BIOS parameter block (64 bytes) */
+typedef struct {
+ __u8 jmp_boot[3];
+ __u8 oem_name[8];
+
+ __u8 sect_size[2]; /* unaligned */
+ __u8 sect_per_clus;
+ __le16 num_reserved; /* . */
+ __u8 num_fats;
+ __u8 num_root_entries[2]; /* unaligned */
+ __u8 num_sectors[2]; /* unaligned */
+ __u8 media_type;
+ __le16 num_fat_sectors;
+ __le16 sectors_in_track;
+ __le16 num_heads;
+ __le32 num_hid_sectors; /* . */
+ __le32 num_huge_sectors;
+
+ union {
+ struct {
+ __u8 phy_drv_no;
+ __u8 state; /* used by WinNT for mount state */
+ __u8 ext_signature;
+ __u8 vol_serial[4];
+ __u8 vol_label[11];
+ __u8 vol_type[8];
+ __le16 nouse;
+ } f16;
+
+ struct {
+ __le32 num_fat32_sectors;
+ __le16 ext_flags;
+ __u8 fs_version[2];
+ __le32 root_cluster; /* . */
+ __le16 fsinfo_sector;
+ __le16 backup_sector;
+ __le16 reserved[6]; /* . */
+ } f32;
+ };
+} bpb_t;
+
+/* FAT32 EXTEND BIOS parameter block (32 bytes) */
+typedef struct {
+ __u8 phy_drv_no;
+ __u8 state; /* used by WindowsNT for mount state */
+ __u8 ext_signature;
+ __u8 vol_serial[4];
+ __u8 vol_label[11];
+ __u8 vol_type[8];
+ __le16 dummy[3];
+} bsx32_t;
+
+/* EXFAT BIOS parameter block (64 bytes) */
+typedef struct {
+ __u8 jmp_boot[3];
+ __u8 oem_name[8];
+ __u8 res_zero[53];
+} bpb64_t;
+
+/* EXFAT EXTEND BIOS parameter block (56 bytes) */
+typedef struct {
+ __le64 vol_offset;
+ __le64 vol_length;
+ __le32 fat_offset;
+ __le32 fat_length;
+ __le32 clu_offset;
+ __le32 clu_count;
+ __le32 root_cluster;
+ __le32 vol_serial;
+ __u8 fs_version[2];
+ __le16 vol_flags;
+ __u8 sect_size_bits;
+ __u8 sect_per_clus_bits;
+ __u8 num_fats;
+ __u8 phy_drv_no;
+ __u8 perc_in_use;
+ __u8 reserved2[7];
+} bsx64_t;
+
+/* FAT32 PBR (64 bytes) */
+typedef struct {
+ bpb_t bpb;
+} pbr16_t;
+
+/* FAT32 PBR[BPB+BSX] (96 bytes) */
+typedef struct {
+ bpb_t bpb;
+ bsx32_t bsx;
+} pbr32_t;
+
+/* EXFAT PBR[BPB+BSX] (120 bytes) */
+typedef struct {
+ bpb64_t bpb;
+ bsx64_t bsx;
+} pbr64_t;
+
+/* Common PBR[Partition Boot Record] (512 bytes) */
+typedef struct {
+ union {
+ __u8 raw[64];
+ bpb_t fat;
+ bpb64_t f64;
+ } bpb;
+ union {
+ __u8 raw[56];
+ bsx32_t f32;
+ bsx64_t f64;
+ } bsx;
+ __u8 boot_code[390];
+ __le16 signature;
+} pbr_t;
+
+/* FAT32 filesystem information sector (512 bytes) */
+typedef struct {
+ __le32 signature1; // aligned
+ __u8 reserved1[480];
+ __le32 signature2; // aligned
+ __le32 free_cluster; // aligned
+ __le32 next_cluster; // aligned
+ __u8 reserved2[14];
+ __le16 signature3[2];
+} fat32_fsi_t;
+
+/* FAT directory entry (32 bytes) */
+typedef struct {
+ __u8 dummy[32];
+} DENTRY_T;
+
+typedef struct {
+ __u8 name[DOS_NAME_LENGTH]; /* 11 chars */
+ __u8 attr;
+ __u8 lcase;
+ __u8 create_time_ms;
+ __le16 create_time; // aligned
+ __le16 create_date; // aligned
+ __le16 access_date; // aligned
+ __le16 start_clu_hi; // aligned
+ __le16 modify_time; // aligned
+ __le16 modify_date; // aligned
+ __le16 start_clu_lo; // aligned
+ __le32 size; // aligned
+} DOS_DENTRY_T;
+
+/* FAT extended directory entry (32 bytes) */
+typedef struct {
+ __u8 order;
+ __u8 unicode_0_4[10];
+ __u8 attr;
+ __u8 sysid;
+ __u8 checksum;
+ __le16 unicode_5_10[6]; // aligned
+ __le16 start_clu; // aligned
+ __le16 unicode_11_12[2]; // aligned
+} EXT_DENTRY_T;
+
+/* EXFAT file directory entry (32 bytes) */
+typedef struct {
+ __u8 type;
+ __u8 num_ext;
+ __le16 checksum; // aligned
+ __le16 attr; // aligned
+ __le16 reserved1;
+ __le16 create_time; // aligned
+ __le16 create_date; // aligned
+ __le16 modify_time; // aligned
+ __le16 modify_date; // aligned
+ __le16 access_time; // aligned
+ __le16 access_date; // aligned
+ __u8 create_time_ms;
+ __u8 modify_time_ms;
+ __u8 create_tz;
+ __u8 modify_tz;
+ __u8 access_tz;
+ __u8 reserved2[7];
+} FILE_DENTRY_T;
+
+/* EXFAT stream extension directory entry (32 bytes) */
+typedef struct {
+ __u8 type;
+ __u8 flags;
+ __u8 reserved1;
+ __u8 name_len;
+ __le16 name_hash; // aligned
+ __le16 reserved2;
+ __le64 valid_size; // aligned
+ __le32 reserved3; // aligned
+ __le32 start_clu; // aligned
+ __le64 size; // aligned
+} STRM_DENTRY_T;
+
+/* EXFAT file name directory entry (32 bytes) */
+typedef struct {
+ __u8 type;
+ __u8 flags;
+ __le16 unicode_0_14[15]; // aligned
+} NAME_DENTRY_T;
+
+/* EXFAT allocation bitmap directory entry (32 bytes) */
+typedef struct {
+ __u8 type;
+ __u8 flags;
+ __u8 reserved[18];
+ __le32 start_clu; // aligned
+ __le64 size; // aligned
+} BMAP_DENTRY_T;
+
+/* EXFAT up-case table directory entry (32 bytes) */
+typedef struct {
+ __u8 type;
+ __u8 reserved1[3];
+ __le32 checksum; // aligned
+ __u8 reserved2[12];
+ __le32 start_clu; // aligned
+ __le64 size; // aligned
+} CASE_DENTRY_T;
+
+/* EXFAT volume label directory entry (32 bytes) */
+typedef struct {
+ __u8 type;
+ __u8 label_len;
+ __le16 unicode_0_10[11]; // aligned
+ __u8 reserved[8];
+} VOLM_DENTRY_T;
+
+#endif /* _SDFAT_FS_H */
--- /dev/null
+#include "sdfat.h"
+
+#define SDFAT_VF_CLUS_MAX 7 /* 512 Byte ~ 32 KByte */
+#define SDFAT_EF_CLUS_MAX 17 /* 512 Byte ~ 32 MByte */
+
+enum {
+ SDFAT_MNT_FAT12,
+ SDFAT_MNT_FAT16,
+ SDFAT_MNT_FAT32,
+ SDFAT_MNT_EXFAT,
+ SDFAT_MNT_RO,
+ SDFAT_MNT_MAX
+};
+
+enum {
+ SDFAT_OP_EXFAT_MNT,
+ SDFAT_OP_MKDIR,
+ SDFAT_OP_CREATE,
+ SDFAT_OP_READ,
+ SDFAT_OP_WRITE,
+ SDFAT_OP_TRUNC,
+ SDFAT_OP_MAX
+};
+
+enum {
+ SDFAT_VOL_4G,
+ SDFAT_VOL_8G,
+ SDFAT_VOL_16G,
+ SDFAT_VOL_32G,
+ SDFAT_VOL_64G,
+ SDFAT_VOL_128G,
+ SDFAT_VOL_256G,
+ SDFAT_VOL_512G,
+ SDFAT_VOL_XTB,
+ SDFAT_VOL_MAX
+};
+
+static struct sdfat_statistics {
+ u32 clus_vfat[SDFAT_VF_CLUS_MAX];
+ u32 clus_exfat[SDFAT_EF_CLUS_MAX];
+ u32 mnt_cnt[SDFAT_MNT_MAX];
+ u32 nofat_op[SDFAT_OP_MAX];
+ u32 vol_size[SDFAT_VOL_MAX];
+} statistics;
+
+static struct kset *sdfat_statistics_kset;
+
+static ssize_t vfat_cl_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ return snprintf(buff, PAGE_SIZE, "\"VCL_512B_I\":\"%u\","
+ "\"VCL_1K_I\":\"%u\",\"VCL_2K_I\":\"%u\","
+ "\"VCL_4K_I\":\"%u\",\"VCL_8K_I\":\"%u\","
+ "\"VCL_16K_I\":\"%u\",\"VCL_32K_I\":\"%u\"\n",
+ statistics.clus_vfat[0], statistics.clus_vfat[1],
+ statistics.clus_vfat[2], statistics.clus_vfat[3],
+ statistics.clus_vfat[4], statistics.clus_vfat[5],
+ statistics.clus_vfat[6]);
+}
+
+static ssize_t exfat_cl_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ return snprintf(buff, PAGE_SIZE, "\"ECL_512B_I\":\"%u\","
+ "\"ECL_1K_I\":\"%u\",\"ECL_2K_I\":\"%u\","
+ "\"ECL_4K_I\":\"%u\",\"ECL_8K_I\":\"%u\","
+ "\"ECL_16K_I\":\"%u\",\"ECL_32K_I\":\"%u\","
+ "\"ECL_64K_I\":\"%u\",\"ECL_128K_I\":\"%u\","
+ "\"ECL_256K_I\":\"%u\",\"ECL_512K_I\":\"%u\","
+ "\"ECL_1M_I\":\"%u\",\"ECL_2M_I\":\"%u\","
+ "\"ECL_4M_I\":\"%u\",\"ECL_8M_I\":\"%u\","
+ "\"ECL_16M_I\":\"%u\",\"ECL_32M_I\":\"%u\"\n",
+ statistics.clus_exfat[0], statistics.clus_exfat[1],
+ statistics.clus_exfat[2], statistics.clus_exfat[3],
+ statistics.clus_exfat[4], statistics.clus_exfat[5],
+ statistics.clus_exfat[6], statistics.clus_exfat[7],
+ statistics.clus_exfat[8], statistics.clus_exfat[9],
+ statistics.clus_exfat[10], statistics.clus_exfat[11],
+ statistics.clus_exfat[12], statistics.clus_exfat[13],
+ statistics.clus_exfat[14], statistics.clus_exfat[15],
+ statistics.clus_exfat[16]);
+}
+
+static ssize_t mount_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ return snprintf(buff, PAGE_SIZE, "\"FAT12_MNT_I\":\"%u\","
+ "\"FAT16_MNT_I\":\"%u\",\"FAT32_MNT_I\":\"%u\","
+ "\"EXFAT_MNT_I\":\"%u\",\"RO_MNT_I\":\"%u\"\n",
+ statistics.mnt_cnt[SDFAT_MNT_FAT12],
+ statistics.mnt_cnt[SDFAT_MNT_FAT16],
+ statistics.mnt_cnt[SDFAT_MNT_FAT32],
+ statistics.mnt_cnt[SDFAT_MNT_EXFAT],
+ statistics.mnt_cnt[SDFAT_MNT_RO]);
+}
+
+static ssize_t nofat_op_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ return snprintf(buff, PAGE_SIZE, "\"NOFAT_MOUNT_I\":\"%u\","
+ "\"NOFAT_MKDIR_I\":\"%u\",\"NOFAT_CREATE_I\":\"%u\","
+ "\"NOFAT_READ_I\":\"%u\",\"NOFAT_WRITE_I\":\"%u\","
+ "\"NOFAT_TRUNC_I\":\"%u\"\n",
+ statistics.nofat_op[SDFAT_OP_EXFAT_MNT],
+ statistics.nofat_op[SDFAT_OP_MKDIR],
+ statistics.nofat_op[SDFAT_OP_CREATE],
+ statistics.nofat_op[SDFAT_OP_READ],
+ statistics.nofat_op[SDFAT_OP_WRITE],
+ statistics.nofat_op[SDFAT_OP_TRUNC]);
+}
+
+static ssize_t vol_size_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ return snprintf(buff, PAGE_SIZE, "\"VOL_4G_I\":\"%u\","
+ "\"VOL_8G_I\":\"%u\",\"VOL_16G_I\":\"%u\","
+ "\"VOL_32G_I\":\"%u\",\"VOL_64G_I\":\"%u\","
+ "\"VOL_128G_I\":\"%u\",\"VOL_256G_I\":\"%u\","
+ "\"VOL_512G_I\":\"%u\",\"VOL_XTB_I\":\"%u\"\n",
+ statistics.vol_size[SDFAT_VOL_4G],
+ statistics.vol_size[SDFAT_VOL_8G],
+ statistics.vol_size[SDFAT_VOL_16G],
+ statistics.vol_size[SDFAT_VOL_32G],
+ statistics.vol_size[SDFAT_VOL_64G],
+ statistics.vol_size[SDFAT_VOL_128G],
+ statistics.vol_size[SDFAT_VOL_256G],
+ statistics.vol_size[SDFAT_VOL_512G],
+ statistics.vol_size[SDFAT_VOL_XTB]);
+}
+
+static struct kobj_attribute vfat_cl_attr = __ATTR_RO(vfat_cl);
+static struct kobj_attribute exfat_cl_attr = __ATTR_RO(exfat_cl);
+static struct kobj_attribute mount_attr = __ATTR_RO(mount);
+static struct kobj_attribute nofat_op_attr = __ATTR_RO(nofat_op);
+static struct kobj_attribute vol_size_attr = __ATTR_RO(vol_size);
+
+static struct attribute *attributes_statistics[] = {
+ &vfat_cl_attr.attr,
+ &exfat_cl_attr.attr,
+ &mount_attr.attr,
+ &nofat_op_attr.attr,
+ &vol_size_attr.attr,
+ NULL,
+};
+
+static struct attribute_group attr_group_statistics = {
+ .attrs = attributes_statistics,
+};
+
+int sdfat_statistics_init(struct kset *sdfat_kset)
+{
+ int err;
+
+ sdfat_statistics_kset = kset_create_and_add("statistics", NULL, &sdfat_kset->kobj);
+ if (!sdfat_statistics_kset) {
+ pr_err("[SDFAT] failed to create sdfat statistics kobj\n");
+ return -ENOMEM;
+ }
+
+ err = sysfs_create_group(&sdfat_statistics_kset->kobj, &attr_group_statistics);
+ if (err) {
+ pr_err("[SDFAT] failed to create sdfat statistics attributes\n");
+ kset_unregister(sdfat_statistics_kset);
+ sdfat_statistics_kset = NULL;
+ return err;
+ }
+
+ return 0;
+}
+
+void sdfat_statistics_uninit(void)
+{
+ if (sdfat_statistics_kset) {
+ sysfs_remove_group(&sdfat_statistics_kset->kobj, &attr_group_statistics);
+ kset_unregister(sdfat_statistics_kset);
+ sdfat_statistics_kset = NULL;
+ }
+ memset(&statistics, 0, sizeof(struct sdfat_statistics));
+}
+
+void sdfat_statistics_set_mnt(FS_INFO_T *fsi)
+{
+ if (fsi->vol_type == EXFAT) {
+ statistics.mnt_cnt[SDFAT_MNT_EXFAT]++;
+ statistics.nofat_op[SDFAT_OP_EXFAT_MNT] = 1;
+ if (fsi->sect_per_clus_bits < SDFAT_EF_CLUS_MAX)
+ statistics.clus_exfat[fsi->sect_per_clus_bits]++;
+ else
+ statistics.clus_exfat[SDFAT_EF_CLUS_MAX - 1]++;
+ return;
+ }
+
+ if (fsi->vol_type == FAT32)
+ statistics.mnt_cnt[SDFAT_MNT_FAT32]++;
+ else if (fsi->vol_type == FAT16)
+ statistics.mnt_cnt[SDFAT_MNT_FAT16]++;
+ else if (fsi->vol_type == FAT12)
+ statistics.mnt_cnt[SDFAT_MNT_FAT12]++;
+
+ if (fsi->sect_per_clus_bits < SDFAT_VF_CLUS_MAX)
+ statistics.clus_vfat[fsi->sect_per_clus_bits]++;
+ else
+ statistics.clus_vfat[SDFAT_VF_CLUS_MAX - 1]++;
+}
+
+void sdfat_statistics_set_mnt_ro(void)
+{
+ statistics.mnt_cnt[SDFAT_MNT_RO]++;
+}
+
+void sdfat_statistics_set_mkdir(u8 flags)
+{
+ if (flags != 0x03)
+ return;
+ statistics.nofat_op[SDFAT_OP_MKDIR] = 1;
+}
+
+void sdfat_statistics_set_create(u8 flags)
+{
+ if (flags != 0x03)
+ return;
+ statistics.nofat_op[SDFAT_OP_CREATE] = 1;
+}
+
+/* flags : file or dir flgas, 0x03 means no fat-chain.
+ * clu_offset : file or dir logical cluster offset
+ * create : BMAP_ADD_CLUSTER or not
+ *
+ * File or dir have BMAP_ADD_CLUSTER is no fat-chain write
+ * when they have 0x03 flag and two or more clusters.
+ * And don`t have BMAP_ADD_CLUSTER is no fat-chain read
+ * when above same condition.
+ */
+void sdfat_statistics_set_rw(u8 flags, u32 clu_offset, s32 create)
+{
+ if ((flags == 0x03) && (clu_offset > 1)) {
+ if (create)
+ statistics.nofat_op[SDFAT_OP_WRITE] = 1;
+ else
+ statistics.nofat_op[SDFAT_OP_READ] = 1;
+ }
+}
+
+/* flags : file or dir flgas, 0x03 means no fat-chain.
+ * clu : cluster chain
+ *
+ * Set no fat-chain trunc when file or dir have 0x03 flag
+ * and two or more clusters.
+ */
+void sdfat_statistics_set_trunc(u8 flags, CHAIN_T *clu)
+{
+ if ((flags == 0x03) && (clu->size > 1))
+ statistics.nofat_op[SDFAT_OP_TRUNC] = 1;
+}
+
+void sdfat_statistics_set_vol_size(struct super_block *sb)
+{
+ u64 vol_size;
+ FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
+
+ vol_size = (u64)fsi->num_sectors << sb->s_blocksize_bits;
+
+ if (vol_size <= ((u64)1 << 32))
+ statistics.vol_size[SDFAT_VOL_4G]++;
+ else if (vol_size <= ((u64)1 << 33))
+ statistics.vol_size[SDFAT_VOL_8G]++;
+ else if (vol_size <= ((u64)1 << 34))
+ statistics.vol_size[SDFAT_VOL_16G]++;
+ else if (vol_size <= ((u64)1 << 35))
+ statistics.vol_size[SDFAT_VOL_32G]++;
+ else if (vol_size <= ((u64)1 << 36))
+ statistics.vol_size[SDFAT_VOL_64G]++;
+ else if (vol_size <= ((u64)1 << 37))
+ statistics.vol_size[SDFAT_VOL_128G]++;
+ else if (vol_size <= ((u64)1 << 38))
+ statistics.vol_size[SDFAT_VOL_256G]++;
+ else if (vol_size <= ((u64)1 << 39))
+ statistics.vol_size[SDFAT_VOL_512G]++;
+ else
+ statistics.vol_size[SDFAT_VOL_XTB]++;
+}
--- /dev/null
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _UPCASE_H
+#define _UPCASE_H
+
+/* Upcase tabel macro */
+#define SDFAT_NUM_UPCASE 2918
+#define HIGH_INDEX_BIT (8)
+#define HIGH_INDEX_MASK (0xFF00)
+#define LOW_INDEX_BIT (16-HIGH_INDEX_BIT)
+#define UTBL_ROW_COUNT (1<<LOW_INDEX_BIT)
+#define UTBL_COL_COUNT (1<<HIGH_INDEX_BIT)
+
+static inline u16 get_col_index(u16 i)
+{
+ return i >> LOW_INDEX_BIT;
+}
+static inline u16 get_row_index(u16 i)
+{
+ return i & ~HIGH_INDEX_MASK;
+}
+
+
+static const u8 uni_def_upcase[SDFAT_NUM_UPCASE<<1] = {
+ 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x03, 0x00, 0x04, 0x00, 0x05, 0x00, 0x06, 0x00, 0x07, 0x00,
+ 0x08, 0x00, 0x09, 0x00, 0x0A, 0x00, 0x0B, 0x00, 0x0C, 0x00, 0x0D, 0x00, 0x0E, 0x00, 0x0F, 0x00,
+ 0x10, 0x00, 0x11, 0x00, 0x12, 0x00, 0x13, 0x00, 0x14, 0x00, 0x15, 0x00, 0x16, 0x00, 0x17, 0x00,
+ 0x18, 0x00, 0x19, 0x00, 0x1A, 0x00, 0x1B, 0x00, 0x1C, 0x00, 0x1D, 0x00, 0x1E, 0x00, 0x1F, 0x00,
+ 0x20, 0x00, 0x21, 0x00, 0x22, 0x00, 0x23, 0x00, 0x24, 0x00, 0x25, 0x00, 0x26, 0x00, 0x27, 0x00,
+ 0x28, 0x00, 0x29, 0x00, 0x2A, 0x00, 0x2B, 0x00, 0x2C, 0x00, 0x2D, 0x00, 0x2E, 0x00, 0x2F, 0x00,
+ 0x30, 0x00, 0x31, 0x00, 0x32, 0x00, 0x33, 0x00, 0x34, 0x00, 0x35, 0x00, 0x36, 0x00, 0x37, 0x00,
+ 0x38, 0x00, 0x39, 0x00, 0x3A, 0x00, 0x3B, 0x00, 0x3C, 0x00, 0x3D, 0x00, 0x3E, 0x00, 0x3F, 0x00,
+ 0x40, 0x00, 0x41, 0x00, 0x42, 0x00, 0x43, 0x00, 0x44, 0x00, 0x45, 0x00, 0x46, 0x00, 0x47, 0x00,
+ 0x48, 0x00, 0x49, 0x00, 0x4A, 0x00, 0x4B, 0x00, 0x4C, 0x00, 0x4D, 0x00, 0x4E, 0x00, 0x4F, 0x00,
+ 0x50, 0x00, 0x51, 0x00, 0x52, 0x00, 0x53, 0x00, 0x54, 0x00, 0x55, 0x00, 0x56, 0x00, 0x57, 0x00,
+ 0x58, 0x00, 0x59, 0x00, 0x5A, 0x00, 0x5B, 0x00, 0x5C, 0x00, 0x5D, 0x00, 0x5E, 0x00, 0x5F, 0x00,
+ 0x60, 0x00, 0x41, 0x00, 0x42, 0x00, 0x43, 0x00, 0x44, 0x00, 0x45, 0x00, 0x46, 0x00, 0x47, 0x00,
+ 0x48, 0x00, 0x49, 0x00, 0x4A, 0x00, 0x4B, 0x00, 0x4C, 0x00, 0x4D, 0x00, 0x4E, 0x00, 0x4F, 0x00,
+ 0x50, 0x00, 0x51, 0x00, 0x52, 0x00, 0x53, 0x00, 0x54, 0x00, 0x55, 0x00, 0x56, 0x00, 0x57, 0x00,
+ 0x58, 0x00, 0x59, 0x00, 0x5A, 0x00, 0x7B, 0x00, 0x7C, 0x00, 0x7D, 0x00, 0x7E, 0x00, 0x7F, 0x00,
+ 0x80, 0x00, 0x81, 0x00, 0x82, 0x00, 0x83, 0x00, 0x84, 0x00, 0x85, 0x00, 0x86, 0x00, 0x87, 0x00,
+ 0x88, 0x00, 0x89, 0x00, 0x8A, 0x00, 0x8B, 0x00, 0x8C, 0x00, 0x8D, 0x00, 0x8E, 0x00, 0x8F, 0x00,
+ 0x90, 0x00, 0x91, 0x00, 0x92, 0x00, 0x93, 0x00, 0x94, 0x00, 0x95, 0x00, 0x96, 0x00, 0x97, 0x00,
+ 0x98, 0x00, 0x99, 0x00, 0x9A, 0x00, 0x9B, 0x00, 0x9C, 0x00, 0x9D, 0x00, 0x9E, 0x00, 0x9F, 0x00,
+ 0xA0, 0x00, 0xA1, 0x00, 0xA2, 0x00, 0xA3, 0x00, 0xA4, 0x00, 0xA5, 0x00, 0xA6, 0x00, 0xA7, 0x00,
+ 0xA8, 0x00, 0xA9, 0x00, 0xAA, 0x00, 0xAB, 0x00, 0xAC, 0x00, 0xAD, 0x00, 0xAE, 0x00, 0xAF, 0x00,
+ 0xB0, 0x00, 0xB1, 0x00, 0xB2, 0x00, 0xB3, 0x00, 0xB4, 0x00, 0xB5, 0x00, 0xB6, 0x00, 0xB7, 0x00,
+ 0xB8, 0x00, 0xB9, 0x00, 0xBA, 0x00, 0xBB, 0x00, 0xBC, 0x00, 0xBD, 0x00, 0xBE, 0x00, 0xBF, 0x00,
+ 0xC0, 0x00, 0xC1, 0x00, 0xC2, 0x00, 0xC3, 0x00, 0xC4, 0x00, 0xC5, 0x00, 0xC6, 0x00, 0xC7, 0x00,
+ 0xC8, 0x00, 0xC9, 0x00, 0xCA, 0x00, 0xCB, 0x00, 0xCC, 0x00, 0xCD, 0x00, 0xCE, 0x00, 0xCF, 0x00,
+ 0xD0, 0x00, 0xD1, 0x00, 0xD2, 0x00, 0xD3, 0x00, 0xD4, 0x00, 0xD5, 0x00, 0xD6, 0x00, 0xD7, 0x00,
+ 0xD8, 0x00, 0xD9, 0x00, 0xDA, 0x00, 0xDB, 0x00, 0xDC, 0x00, 0xDD, 0x00, 0xDE, 0x00, 0xDF, 0x00,
+ 0xC0, 0x00, 0xC1, 0x00, 0xC2, 0x00, 0xC3, 0x00, 0xC4, 0x00, 0xC5, 0x00, 0xC6, 0x00, 0xC7, 0x00,
+ 0xC8, 0x00, 0xC9, 0x00, 0xCA, 0x00, 0xCB, 0x00, 0xCC, 0x00, 0xCD, 0x00, 0xCE, 0x00, 0xCF, 0x00,
+ 0xD0, 0x00, 0xD1, 0x00, 0xD2, 0x00, 0xD3, 0x00, 0xD4, 0x00, 0xD5, 0x00, 0xD6, 0x00, 0xF7, 0x00,
+ 0xD8, 0x00, 0xD9, 0x00, 0xDA, 0x00, 0xDB, 0x00, 0xDC, 0x00, 0xDD, 0x00, 0xDE, 0x00, 0x78, 0x01,
+ 0x00, 0x01, 0x00, 0x01, 0x02, 0x01, 0x02, 0x01, 0x04, 0x01, 0x04, 0x01, 0x06, 0x01, 0x06, 0x01,
+ 0x08, 0x01, 0x08, 0x01, 0x0A, 0x01, 0x0A, 0x01, 0x0C, 0x01, 0x0C, 0x01, 0x0E, 0x01, 0x0E, 0x01,
+ 0x10, 0x01, 0x10, 0x01, 0x12, 0x01, 0x12, 0x01, 0x14, 0x01, 0x14, 0x01, 0x16, 0x01, 0x16, 0x01,
+ 0x18, 0x01, 0x18, 0x01, 0x1A, 0x01, 0x1A, 0x01, 0x1C, 0x01, 0x1C, 0x01, 0x1E, 0x01, 0x1E, 0x01,
+ 0x20, 0x01, 0x20, 0x01, 0x22, 0x01, 0x22, 0x01, 0x24, 0x01, 0x24, 0x01, 0x26, 0x01, 0x26, 0x01,
+ 0x28, 0x01, 0x28, 0x01, 0x2A, 0x01, 0x2A, 0x01, 0x2C, 0x01, 0x2C, 0x01, 0x2E, 0x01, 0x2E, 0x01,
+ 0x30, 0x01, 0x31, 0x01, 0x32, 0x01, 0x32, 0x01, 0x34, 0x01, 0x34, 0x01, 0x36, 0x01, 0x36, 0x01,
+ 0x38, 0x01, 0x39, 0x01, 0x39, 0x01, 0x3B, 0x01, 0x3B, 0x01, 0x3D, 0x01, 0x3D, 0x01, 0x3F, 0x01,
+ 0x3F, 0x01, 0x41, 0x01, 0x41, 0x01, 0x43, 0x01, 0x43, 0x01, 0x45, 0x01, 0x45, 0x01, 0x47, 0x01,
+ 0x47, 0x01, 0x49, 0x01, 0x4A, 0x01, 0x4A, 0x01, 0x4C, 0x01, 0x4C, 0x01, 0x4E, 0x01, 0x4E, 0x01,
+ 0x50, 0x01, 0x50, 0x01, 0x52, 0x01, 0x52, 0x01, 0x54, 0x01, 0x54, 0x01, 0x56, 0x01, 0x56, 0x01,
+ 0x58, 0x01, 0x58, 0x01, 0x5A, 0x01, 0x5A, 0x01, 0x5C, 0x01, 0x5C, 0x01, 0x5E, 0x01, 0x5E, 0x01,
+ 0x60, 0x01, 0x60, 0x01, 0x62, 0x01, 0x62, 0x01, 0x64, 0x01, 0x64, 0x01, 0x66, 0x01, 0x66, 0x01,
+ 0x68, 0x01, 0x68, 0x01, 0x6A, 0x01, 0x6A, 0x01, 0x6C, 0x01, 0x6C, 0x01, 0x6E, 0x01, 0x6E, 0x01,
+ 0x70, 0x01, 0x70, 0x01, 0x72, 0x01, 0x72, 0x01, 0x74, 0x01, 0x74, 0x01, 0x76, 0x01, 0x76, 0x01,
+ 0x78, 0x01, 0x79, 0x01, 0x79, 0x01, 0x7B, 0x01, 0x7B, 0x01, 0x7D, 0x01, 0x7D, 0x01, 0x7F, 0x01,
+ 0x43, 0x02, 0x81, 0x01, 0x82, 0x01, 0x82, 0x01, 0x84, 0x01, 0x84, 0x01, 0x86, 0x01, 0x87, 0x01,
+ 0x87, 0x01, 0x89, 0x01, 0x8A, 0x01, 0x8B, 0x01, 0x8B, 0x01, 0x8D, 0x01, 0x8E, 0x01, 0x8F, 0x01,
+ 0x90, 0x01, 0x91, 0x01, 0x91, 0x01, 0x93, 0x01, 0x94, 0x01, 0xF6, 0x01, 0x96, 0x01, 0x97, 0x01,
+ 0x98, 0x01, 0x98, 0x01, 0x3D, 0x02, 0x9B, 0x01, 0x9C, 0x01, 0x9D, 0x01, 0x20, 0x02, 0x9F, 0x01,
+ 0xA0, 0x01, 0xA0, 0x01, 0xA2, 0x01, 0xA2, 0x01, 0xA4, 0x01, 0xA4, 0x01, 0xA6, 0x01, 0xA7, 0x01,
+ 0xA7, 0x01, 0xA9, 0x01, 0xAA, 0x01, 0xAB, 0x01, 0xAC, 0x01, 0xAC, 0x01, 0xAE, 0x01, 0xAF, 0x01,
+ 0xAF, 0x01, 0xB1, 0x01, 0xB2, 0x01, 0xB3, 0x01, 0xB3, 0x01, 0xB5, 0x01, 0xB5, 0x01, 0xB7, 0x01,
+ 0xB8, 0x01, 0xB8, 0x01, 0xBA, 0x01, 0xBB, 0x01, 0xBC, 0x01, 0xBC, 0x01, 0xBE, 0x01, 0xF7, 0x01,
+ 0xC0, 0x01, 0xC1, 0x01, 0xC2, 0x01, 0xC3, 0x01, 0xC4, 0x01, 0xC5, 0x01, 0xC4, 0x01, 0xC7, 0x01,
+ 0xC8, 0x01, 0xC7, 0x01, 0xCA, 0x01, 0xCB, 0x01, 0xCA, 0x01, 0xCD, 0x01, 0xCD, 0x01, 0xCF, 0x01,
+ 0xCF, 0x01, 0xD1, 0x01, 0xD1, 0x01, 0xD3, 0x01, 0xD3, 0x01, 0xD5, 0x01, 0xD5, 0x01, 0xD7, 0x01,
+ 0xD7, 0x01, 0xD9, 0x01, 0xD9, 0x01, 0xDB, 0x01, 0xDB, 0x01, 0x8E, 0x01, 0xDE, 0x01, 0xDE, 0x01,
+ 0xE0, 0x01, 0xE0, 0x01, 0xE2, 0x01, 0xE2, 0x01, 0xE4, 0x01, 0xE4, 0x01, 0xE6, 0x01, 0xE6, 0x01,
+ 0xE8, 0x01, 0xE8, 0x01, 0xEA, 0x01, 0xEA, 0x01, 0xEC, 0x01, 0xEC, 0x01, 0xEE, 0x01, 0xEE, 0x01,
+ 0xF0, 0x01, 0xF1, 0x01, 0xF2, 0x01, 0xF1, 0x01, 0xF4, 0x01, 0xF4, 0x01, 0xF6, 0x01, 0xF7, 0x01,
+ 0xF8, 0x01, 0xF8, 0x01, 0xFA, 0x01, 0xFA, 0x01, 0xFC, 0x01, 0xFC, 0x01, 0xFE, 0x01, 0xFE, 0x01,
+ 0x00, 0x02, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02, 0x04, 0x02, 0x04, 0x02, 0x06, 0x02, 0x06, 0x02,
+ 0x08, 0x02, 0x08, 0x02, 0x0A, 0x02, 0x0A, 0x02, 0x0C, 0x02, 0x0C, 0x02, 0x0E, 0x02, 0x0E, 0x02,
+ 0x10, 0x02, 0x10, 0x02, 0x12, 0x02, 0x12, 0x02, 0x14, 0x02, 0x14, 0x02, 0x16, 0x02, 0x16, 0x02,
+ 0x18, 0x02, 0x18, 0x02, 0x1A, 0x02, 0x1A, 0x02, 0x1C, 0x02, 0x1C, 0x02, 0x1E, 0x02, 0x1E, 0x02,
+ 0x20, 0x02, 0x21, 0x02, 0x22, 0x02, 0x22, 0x02, 0x24, 0x02, 0x24, 0x02, 0x26, 0x02, 0x26, 0x02,
+ 0x28, 0x02, 0x28, 0x02, 0x2A, 0x02, 0x2A, 0x02, 0x2C, 0x02, 0x2C, 0x02, 0x2E, 0x02, 0x2E, 0x02,
+ 0x30, 0x02, 0x30, 0x02, 0x32, 0x02, 0x32, 0x02, 0x34, 0x02, 0x35, 0x02, 0x36, 0x02, 0x37, 0x02,
+ 0x38, 0x02, 0x39, 0x02, 0x65, 0x2C, 0x3B, 0x02, 0x3B, 0x02, 0x3D, 0x02, 0x66, 0x2C, 0x3F, 0x02,
+ 0x40, 0x02, 0x41, 0x02, 0x41, 0x02, 0x43, 0x02, 0x44, 0x02, 0x45, 0x02, 0x46, 0x02, 0x46, 0x02,
+ 0x48, 0x02, 0x48, 0x02, 0x4A, 0x02, 0x4A, 0x02, 0x4C, 0x02, 0x4C, 0x02, 0x4E, 0x02, 0x4E, 0x02,
+ 0x50, 0x02, 0x51, 0x02, 0x52, 0x02, 0x81, 0x01, 0x86, 0x01, 0x55, 0x02, 0x89, 0x01, 0x8A, 0x01,
+ 0x58, 0x02, 0x8F, 0x01, 0x5A, 0x02, 0x90, 0x01, 0x5C, 0x02, 0x5D, 0x02, 0x5E, 0x02, 0x5F, 0x02,
+ 0x93, 0x01, 0x61, 0x02, 0x62, 0x02, 0x94, 0x01, 0x64, 0x02, 0x65, 0x02, 0x66, 0x02, 0x67, 0x02,
+ 0x97, 0x01, 0x96, 0x01, 0x6A, 0x02, 0x62, 0x2C, 0x6C, 0x02, 0x6D, 0x02, 0x6E, 0x02, 0x9C, 0x01,
+ 0x70, 0x02, 0x71, 0x02, 0x9D, 0x01, 0x73, 0x02, 0x74, 0x02, 0x9F, 0x01, 0x76, 0x02, 0x77, 0x02,
+ 0x78, 0x02, 0x79, 0x02, 0x7A, 0x02, 0x7B, 0x02, 0x7C, 0x02, 0x64, 0x2C, 0x7E, 0x02, 0x7F, 0x02,
+ 0xA6, 0x01, 0x81, 0x02, 0x82, 0x02, 0xA9, 0x01, 0x84, 0x02, 0x85, 0x02, 0x86, 0x02, 0x87, 0x02,
+ 0xAE, 0x01, 0x44, 0x02, 0xB1, 0x01, 0xB2, 0x01, 0x45, 0x02, 0x8D, 0x02, 0x8E, 0x02, 0x8F, 0x02,
+ 0x90, 0x02, 0x91, 0x02, 0xB7, 0x01, 0x93, 0x02, 0x94, 0x02, 0x95, 0x02, 0x96, 0x02, 0x97, 0x02,
+ 0x98, 0x02, 0x99, 0x02, 0x9A, 0x02, 0x9B, 0x02, 0x9C, 0x02, 0x9D, 0x02, 0x9E, 0x02, 0x9F, 0x02,
+ 0xA0, 0x02, 0xA1, 0x02, 0xA2, 0x02, 0xA3, 0x02, 0xA4, 0x02, 0xA5, 0x02, 0xA6, 0x02, 0xA7, 0x02,
+ 0xA8, 0x02, 0xA9, 0x02, 0xAA, 0x02, 0xAB, 0x02, 0xAC, 0x02, 0xAD, 0x02, 0xAE, 0x02, 0xAF, 0x02,
+ 0xB0, 0x02, 0xB1, 0x02, 0xB2, 0x02, 0xB3, 0x02, 0xB4, 0x02, 0xB5, 0x02, 0xB6, 0x02, 0xB7, 0x02,
+ 0xB8, 0x02, 0xB9, 0x02, 0xBA, 0x02, 0xBB, 0x02, 0xBC, 0x02, 0xBD, 0x02, 0xBE, 0x02, 0xBF, 0x02,
+ 0xC0, 0x02, 0xC1, 0x02, 0xC2, 0x02, 0xC3, 0x02, 0xC4, 0x02, 0xC5, 0x02, 0xC6, 0x02, 0xC7, 0x02,
+ 0xC8, 0x02, 0xC9, 0x02, 0xCA, 0x02, 0xCB, 0x02, 0xCC, 0x02, 0xCD, 0x02, 0xCE, 0x02, 0xCF, 0x02,
+ 0xD0, 0x02, 0xD1, 0x02, 0xD2, 0x02, 0xD3, 0x02, 0xD4, 0x02, 0xD5, 0x02, 0xD6, 0x02, 0xD7, 0x02,
+ 0xD8, 0x02, 0xD9, 0x02, 0xDA, 0x02, 0xDB, 0x02, 0xDC, 0x02, 0xDD, 0x02, 0xDE, 0x02, 0xDF, 0x02,
+ 0xE0, 0x02, 0xE1, 0x02, 0xE2, 0x02, 0xE3, 0x02, 0xE4, 0x02, 0xE5, 0x02, 0xE6, 0x02, 0xE7, 0x02,
+ 0xE8, 0x02, 0xE9, 0x02, 0xEA, 0x02, 0xEB, 0x02, 0xEC, 0x02, 0xED, 0x02, 0xEE, 0x02, 0xEF, 0x02,
+ 0xF0, 0x02, 0xF1, 0x02, 0xF2, 0x02, 0xF3, 0x02, 0xF4, 0x02, 0xF5, 0x02, 0xF6, 0x02, 0xF7, 0x02,
+ 0xF8, 0x02, 0xF9, 0x02, 0xFA, 0x02, 0xFB, 0x02, 0xFC, 0x02, 0xFD, 0x02, 0xFE, 0x02, 0xFF, 0x02,
+ 0x00, 0x03, 0x01, 0x03, 0x02, 0x03, 0x03, 0x03, 0x04, 0x03, 0x05, 0x03, 0x06, 0x03, 0x07, 0x03,
+ 0x08, 0x03, 0x09, 0x03, 0x0A, 0x03, 0x0B, 0x03, 0x0C, 0x03, 0x0D, 0x03, 0x0E, 0x03, 0x0F, 0x03,
+ 0x10, 0x03, 0x11, 0x03, 0x12, 0x03, 0x13, 0x03, 0x14, 0x03, 0x15, 0x03, 0x16, 0x03, 0x17, 0x03,
+ 0x18, 0x03, 0x19, 0x03, 0x1A, 0x03, 0x1B, 0x03, 0x1C, 0x03, 0x1D, 0x03, 0x1E, 0x03, 0x1F, 0x03,
+ 0x20, 0x03, 0x21, 0x03, 0x22, 0x03, 0x23, 0x03, 0x24, 0x03, 0x25, 0x03, 0x26, 0x03, 0x27, 0x03,
+ 0x28, 0x03, 0x29, 0x03, 0x2A, 0x03, 0x2B, 0x03, 0x2C, 0x03, 0x2D, 0x03, 0x2E, 0x03, 0x2F, 0x03,
+ 0x30, 0x03, 0x31, 0x03, 0x32, 0x03, 0x33, 0x03, 0x34, 0x03, 0x35, 0x03, 0x36, 0x03, 0x37, 0x03,
+ 0x38, 0x03, 0x39, 0x03, 0x3A, 0x03, 0x3B, 0x03, 0x3C, 0x03, 0x3D, 0x03, 0x3E, 0x03, 0x3F, 0x03,
+ 0x40, 0x03, 0x41, 0x03, 0x42, 0x03, 0x43, 0x03, 0x44, 0x03, 0x45, 0x03, 0x46, 0x03, 0x47, 0x03,
+ 0x48, 0x03, 0x49, 0x03, 0x4A, 0x03, 0x4B, 0x03, 0x4C, 0x03, 0x4D, 0x03, 0x4E, 0x03, 0x4F, 0x03,
+ 0x50, 0x03, 0x51, 0x03, 0x52, 0x03, 0x53, 0x03, 0x54, 0x03, 0x55, 0x03, 0x56, 0x03, 0x57, 0x03,
+ 0x58, 0x03, 0x59, 0x03, 0x5A, 0x03, 0x5B, 0x03, 0x5C, 0x03, 0x5D, 0x03, 0x5E, 0x03, 0x5F, 0x03,
+ 0x60, 0x03, 0x61, 0x03, 0x62, 0x03, 0x63, 0x03, 0x64, 0x03, 0x65, 0x03, 0x66, 0x03, 0x67, 0x03,
+ 0x68, 0x03, 0x69, 0x03, 0x6A, 0x03, 0x6B, 0x03, 0x6C, 0x03, 0x6D, 0x03, 0x6E, 0x03, 0x6F, 0x03,
+ 0x70, 0x03, 0x71, 0x03, 0x72, 0x03, 0x73, 0x03, 0x74, 0x03, 0x75, 0x03, 0x76, 0x03, 0x77, 0x03,
+ 0x78, 0x03, 0x79, 0x03, 0x7A, 0x03, 0xFD, 0x03, 0xFE, 0x03, 0xFF, 0x03, 0x7E, 0x03, 0x7F, 0x03,
+ 0x80, 0x03, 0x81, 0x03, 0x82, 0x03, 0x83, 0x03, 0x84, 0x03, 0x85, 0x03, 0x86, 0x03, 0x87, 0x03,
+ 0x88, 0x03, 0x89, 0x03, 0x8A, 0x03, 0x8B, 0x03, 0x8C, 0x03, 0x8D, 0x03, 0x8E, 0x03, 0x8F, 0x03,
+ 0x90, 0x03, 0x91, 0x03, 0x92, 0x03, 0x93, 0x03, 0x94, 0x03, 0x95, 0x03, 0x96, 0x03, 0x97, 0x03,
+ 0x98, 0x03, 0x99, 0x03, 0x9A, 0x03, 0x9B, 0x03, 0x9C, 0x03, 0x9D, 0x03, 0x9E, 0x03, 0x9F, 0x03,
+ 0xA0, 0x03, 0xA1, 0x03, 0xA2, 0x03, 0xA3, 0x03, 0xA4, 0x03, 0xA5, 0x03, 0xA6, 0x03, 0xA7, 0x03,
+ 0xA8, 0x03, 0xA9, 0x03, 0xAA, 0x03, 0xAB, 0x03, 0x86, 0x03, 0x88, 0x03, 0x89, 0x03, 0x8A, 0x03,
+ 0xB0, 0x03, 0x91, 0x03, 0x92, 0x03, 0x93, 0x03, 0x94, 0x03, 0x95, 0x03, 0x96, 0x03, 0x97, 0x03,
+ 0x98, 0x03, 0x99, 0x03, 0x9A, 0x03, 0x9B, 0x03, 0x9C, 0x03, 0x9D, 0x03, 0x9E, 0x03, 0x9F, 0x03,
+ 0xA0, 0x03, 0xA1, 0x03, 0xA3, 0x03, 0xA3, 0x03, 0xA4, 0x03, 0xA5, 0x03, 0xA6, 0x03, 0xA7, 0x03,
+ 0xA8, 0x03, 0xA9, 0x03, 0xAA, 0x03, 0xAB, 0x03, 0x8C, 0x03, 0x8E, 0x03, 0x8F, 0x03, 0xCF, 0x03,
+ 0xD0, 0x03, 0xD1, 0x03, 0xD2, 0x03, 0xD3, 0x03, 0xD4, 0x03, 0xD5, 0x03, 0xD6, 0x03, 0xD7, 0x03,
+ 0xD8, 0x03, 0xD8, 0x03, 0xDA, 0x03, 0xDA, 0x03, 0xDC, 0x03, 0xDC, 0x03, 0xDE, 0x03, 0xDE, 0x03,
+ 0xE0, 0x03, 0xE0, 0x03, 0xE2, 0x03, 0xE2, 0x03, 0xE4, 0x03, 0xE4, 0x03, 0xE6, 0x03, 0xE6, 0x03,
+ 0xE8, 0x03, 0xE8, 0x03, 0xEA, 0x03, 0xEA, 0x03, 0xEC, 0x03, 0xEC, 0x03, 0xEE, 0x03, 0xEE, 0x03,
+ 0xF0, 0x03, 0xF1, 0x03, 0xF9, 0x03, 0xF3, 0x03, 0xF4, 0x03, 0xF5, 0x03, 0xF6, 0x03, 0xF7, 0x03,
+ 0xF7, 0x03, 0xF9, 0x03, 0xFA, 0x03, 0xFA, 0x03, 0xFC, 0x03, 0xFD, 0x03, 0xFE, 0x03, 0xFF, 0x03,
+ 0x00, 0x04, 0x01, 0x04, 0x02, 0x04, 0x03, 0x04, 0x04, 0x04, 0x05, 0x04, 0x06, 0x04, 0x07, 0x04,
+ 0x08, 0x04, 0x09, 0x04, 0x0A, 0x04, 0x0B, 0x04, 0x0C, 0x04, 0x0D, 0x04, 0x0E, 0x04, 0x0F, 0x04,
+ 0x10, 0x04, 0x11, 0x04, 0x12, 0x04, 0x13, 0x04, 0x14, 0x04, 0x15, 0x04, 0x16, 0x04, 0x17, 0x04,
+ 0x18, 0x04, 0x19, 0x04, 0x1A, 0x04, 0x1B, 0x04, 0x1C, 0x04, 0x1D, 0x04, 0x1E, 0x04, 0x1F, 0x04,
+ 0x20, 0x04, 0x21, 0x04, 0x22, 0x04, 0x23, 0x04, 0x24, 0x04, 0x25, 0x04, 0x26, 0x04, 0x27, 0x04,
+ 0x28, 0x04, 0x29, 0x04, 0x2A, 0x04, 0x2B, 0x04, 0x2C, 0x04, 0x2D, 0x04, 0x2E, 0x04, 0x2F, 0x04,
+ 0x10, 0x04, 0x11, 0x04, 0x12, 0x04, 0x13, 0x04, 0x14, 0x04, 0x15, 0x04, 0x16, 0x04, 0x17, 0x04,
+ 0x18, 0x04, 0x19, 0x04, 0x1A, 0x04, 0x1B, 0x04, 0x1C, 0x04, 0x1D, 0x04, 0x1E, 0x04, 0x1F, 0x04,
+ 0x20, 0x04, 0x21, 0x04, 0x22, 0x04, 0x23, 0x04, 0x24, 0x04, 0x25, 0x04, 0x26, 0x04, 0x27, 0x04,
+ 0x28, 0x04, 0x29, 0x04, 0x2A, 0x04, 0x2B, 0x04, 0x2C, 0x04, 0x2D, 0x04, 0x2E, 0x04, 0x2F, 0x04,
+ 0x00, 0x04, 0x01, 0x04, 0x02, 0x04, 0x03, 0x04, 0x04, 0x04, 0x05, 0x04, 0x06, 0x04, 0x07, 0x04,
+ 0x08, 0x04, 0x09, 0x04, 0x0A, 0x04, 0x0B, 0x04, 0x0C, 0x04, 0x0D, 0x04, 0x0E, 0x04, 0x0F, 0x04,
+ 0x60, 0x04, 0x60, 0x04, 0x62, 0x04, 0x62, 0x04, 0x64, 0x04, 0x64, 0x04, 0x66, 0x04, 0x66, 0x04,
+ 0x68, 0x04, 0x68, 0x04, 0x6A, 0x04, 0x6A, 0x04, 0x6C, 0x04, 0x6C, 0x04, 0x6E, 0x04, 0x6E, 0x04,
+ 0x70, 0x04, 0x70, 0x04, 0x72, 0x04, 0x72, 0x04, 0x74, 0x04, 0x74, 0x04, 0x76, 0x04, 0x76, 0x04,
+ 0x78, 0x04, 0x78, 0x04, 0x7A, 0x04, 0x7A, 0x04, 0x7C, 0x04, 0x7C, 0x04, 0x7E, 0x04, 0x7E, 0x04,
+ 0x80, 0x04, 0x80, 0x04, 0x82, 0x04, 0x83, 0x04, 0x84, 0x04, 0x85, 0x04, 0x86, 0x04, 0x87, 0x04,
+ 0x88, 0x04, 0x89, 0x04, 0x8A, 0x04, 0x8A, 0x04, 0x8C, 0x04, 0x8C, 0x04, 0x8E, 0x04, 0x8E, 0x04,
+ 0x90, 0x04, 0x90, 0x04, 0x92, 0x04, 0x92, 0x04, 0x94, 0x04, 0x94, 0x04, 0x96, 0x04, 0x96, 0x04,
+ 0x98, 0x04, 0x98, 0x04, 0x9A, 0x04, 0x9A, 0x04, 0x9C, 0x04, 0x9C, 0x04, 0x9E, 0x04, 0x9E, 0x04,
+ 0xA0, 0x04, 0xA0, 0x04, 0xA2, 0x04, 0xA2, 0x04, 0xA4, 0x04, 0xA4, 0x04, 0xA6, 0x04, 0xA6, 0x04,
+ 0xA8, 0x04, 0xA8, 0x04, 0xAA, 0x04, 0xAA, 0x04, 0xAC, 0x04, 0xAC, 0x04, 0xAE, 0x04, 0xAE, 0x04,
+ 0xB0, 0x04, 0xB0, 0x04, 0xB2, 0x04, 0xB2, 0x04, 0xB4, 0x04, 0xB4, 0x04, 0xB6, 0x04, 0xB6, 0x04,
+ 0xB8, 0x04, 0xB8, 0x04, 0xBA, 0x04, 0xBA, 0x04, 0xBC, 0x04, 0xBC, 0x04, 0xBE, 0x04, 0xBE, 0x04,
+ 0xC0, 0x04, 0xC1, 0x04, 0xC1, 0x04, 0xC3, 0x04, 0xC3, 0x04, 0xC5, 0x04, 0xC5, 0x04, 0xC7, 0x04,
+ 0xC7, 0x04, 0xC9, 0x04, 0xC9, 0x04, 0xCB, 0x04, 0xCB, 0x04, 0xCD, 0x04, 0xCD, 0x04, 0xC0, 0x04,
+ 0xD0, 0x04, 0xD0, 0x04, 0xD2, 0x04, 0xD2, 0x04, 0xD4, 0x04, 0xD4, 0x04, 0xD6, 0x04, 0xD6, 0x04,
+ 0xD8, 0x04, 0xD8, 0x04, 0xDA, 0x04, 0xDA, 0x04, 0xDC, 0x04, 0xDC, 0x04, 0xDE, 0x04, 0xDE, 0x04,
+ 0xE0, 0x04, 0xE0, 0x04, 0xE2, 0x04, 0xE2, 0x04, 0xE4, 0x04, 0xE4, 0x04, 0xE6, 0x04, 0xE6, 0x04,
+ 0xE8, 0x04, 0xE8, 0x04, 0xEA, 0x04, 0xEA, 0x04, 0xEC, 0x04, 0xEC, 0x04, 0xEE, 0x04, 0xEE, 0x04,
+ 0xF0, 0x04, 0xF0, 0x04, 0xF2, 0x04, 0xF2, 0x04, 0xF4, 0x04, 0xF4, 0x04, 0xF6, 0x04, 0xF6, 0x04,
+ 0xF8, 0x04, 0xF8, 0x04, 0xFA, 0x04, 0xFA, 0x04, 0xFC, 0x04, 0xFC, 0x04, 0xFE, 0x04, 0xFE, 0x04,
+ 0x00, 0x05, 0x00, 0x05, 0x02, 0x05, 0x02, 0x05, 0x04, 0x05, 0x04, 0x05, 0x06, 0x05, 0x06, 0x05,
+ 0x08, 0x05, 0x08, 0x05, 0x0A, 0x05, 0x0A, 0x05, 0x0C, 0x05, 0x0C, 0x05, 0x0E, 0x05, 0x0E, 0x05,
+ 0x10, 0x05, 0x10, 0x05, 0x12, 0x05, 0x12, 0x05, 0x14, 0x05, 0x15, 0x05, 0x16, 0x05, 0x17, 0x05,
+ 0x18, 0x05, 0x19, 0x05, 0x1A, 0x05, 0x1B, 0x05, 0x1C, 0x05, 0x1D, 0x05, 0x1E, 0x05, 0x1F, 0x05,
+ 0x20, 0x05, 0x21, 0x05, 0x22, 0x05, 0x23, 0x05, 0x24, 0x05, 0x25, 0x05, 0x26, 0x05, 0x27, 0x05,
+ 0x28, 0x05, 0x29, 0x05, 0x2A, 0x05, 0x2B, 0x05, 0x2C, 0x05, 0x2D, 0x05, 0x2E, 0x05, 0x2F, 0x05,
+ 0x30, 0x05, 0x31, 0x05, 0x32, 0x05, 0x33, 0x05, 0x34, 0x05, 0x35, 0x05, 0x36, 0x05, 0x37, 0x05,
+ 0x38, 0x05, 0x39, 0x05, 0x3A, 0x05, 0x3B, 0x05, 0x3C, 0x05, 0x3D, 0x05, 0x3E, 0x05, 0x3F, 0x05,
+ 0x40, 0x05, 0x41, 0x05, 0x42, 0x05, 0x43, 0x05, 0x44, 0x05, 0x45, 0x05, 0x46, 0x05, 0x47, 0x05,
+ 0x48, 0x05, 0x49, 0x05, 0x4A, 0x05, 0x4B, 0x05, 0x4C, 0x05, 0x4D, 0x05, 0x4E, 0x05, 0x4F, 0x05,
+ 0x50, 0x05, 0x51, 0x05, 0x52, 0x05, 0x53, 0x05, 0x54, 0x05, 0x55, 0x05, 0x56, 0x05, 0x57, 0x05,
+ 0x58, 0x05, 0x59, 0x05, 0x5A, 0x05, 0x5B, 0x05, 0x5C, 0x05, 0x5D, 0x05, 0x5E, 0x05, 0x5F, 0x05,
+ 0x60, 0x05, 0x31, 0x05, 0x32, 0x05, 0x33, 0x05, 0x34, 0x05, 0x35, 0x05, 0x36, 0x05, 0x37, 0x05,
+ 0x38, 0x05, 0x39, 0x05, 0x3A, 0x05, 0x3B, 0x05, 0x3C, 0x05, 0x3D, 0x05, 0x3E, 0x05, 0x3F, 0x05,
+ 0x40, 0x05, 0x41, 0x05, 0x42, 0x05, 0x43, 0x05, 0x44, 0x05, 0x45, 0x05, 0x46, 0x05, 0x47, 0x05,
+ 0x48, 0x05, 0x49, 0x05, 0x4A, 0x05, 0x4B, 0x05, 0x4C, 0x05, 0x4D, 0x05, 0x4E, 0x05, 0x4F, 0x05,
+ 0x50, 0x05, 0x51, 0x05, 0x52, 0x05, 0x53, 0x05, 0x54, 0x05, 0x55, 0x05, 0x56, 0x05, 0xFF, 0xFF,
+ 0xF6, 0x17, 0x63, 0x2C, 0x7E, 0x1D, 0x7F, 0x1D, 0x80, 0x1D, 0x81, 0x1D, 0x82, 0x1D, 0x83, 0x1D,
+ 0x84, 0x1D, 0x85, 0x1D, 0x86, 0x1D, 0x87, 0x1D, 0x88, 0x1D, 0x89, 0x1D, 0x8A, 0x1D, 0x8B, 0x1D,
+ 0x8C, 0x1D, 0x8D, 0x1D, 0x8E, 0x1D, 0x8F, 0x1D, 0x90, 0x1D, 0x91, 0x1D, 0x92, 0x1D, 0x93, 0x1D,
+ 0x94, 0x1D, 0x95, 0x1D, 0x96, 0x1D, 0x97, 0x1D, 0x98, 0x1D, 0x99, 0x1D, 0x9A, 0x1D, 0x9B, 0x1D,
+ 0x9C, 0x1D, 0x9D, 0x1D, 0x9E, 0x1D, 0x9F, 0x1D, 0xA0, 0x1D, 0xA1, 0x1D, 0xA2, 0x1D, 0xA3, 0x1D,
+ 0xA4, 0x1D, 0xA5, 0x1D, 0xA6, 0x1D, 0xA7, 0x1D, 0xA8, 0x1D, 0xA9, 0x1D, 0xAA, 0x1D, 0xAB, 0x1D,
+ 0xAC, 0x1D, 0xAD, 0x1D, 0xAE, 0x1D, 0xAF, 0x1D, 0xB0, 0x1D, 0xB1, 0x1D, 0xB2, 0x1D, 0xB3, 0x1D,
+ 0xB4, 0x1D, 0xB5, 0x1D, 0xB6, 0x1D, 0xB7, 0x1D, 0xB8, 0x1D, 0xB9, 0x1D, 0xBA, 0x1D, 0xBB, 0x1D,
+ 0xBC, 0x1D, 0xBD, 0x1D, 0xBE, 0x1D, 0xBF, 0x1D, 0xC0, 0x1D, 0xC1, 0x1D, 0xC2, 0x1D, 0xC3, 0x1D,
+ 0xC4, 0x1D, 0xC5, 0x1D, 0xC6, 0x1D, 0xC7, 0x1D, 0xC8, 0x1D, 0xC9, 0x1D, 0xCA, 0x1D, 0xCB, 0x1D,
+ 0xCC, 0x1D, 0xCD, 0x1D, 0xCE, 0x1D, 0xCF, 0x1D, 0xD0, 0x1D, 0xD1, 0x1D, 0xD2, 0x1D, 0xD3, 0x1D,
+ 0xD4, 0x1D, 0xD5, 0x1D, 0xD6, 0x1D, 0xD7, 0x1D, 0xD8, 0x1D, 0xD9, 0x1D, 0xDA, 0x1D, 0xDB, 0x1D,
+ 0xDC, 0x1D, 0xDD, 0x1D, 0xDE, 0x1D, 0xDF, 0x1D, 0xE0, 0x1D, 0xE1, 0x1D, 0xE2, 0x1D, 0xE3, 0x1D,
+ 0xE4, 0x1D, 0xE5, 0x1D, 0xE6, 0x1D, 0xE7, 0x1D, 0xE8, 0x1D, 0xE9, 0x1D, 0xEA, 0x1D, 0xEB, 0x1D,
+ 0xEC, 0x1D, 0xED, 0x1D, 0xEE, 0x1D, 0xEF, 0x1D, 0xF0, 0x1D, 0xF1, 0x1D, 0xF2, 0x1D, 0xF3, 0x1D,
+ 0xF4, 0x1D, 0xF5, 0x1D, 0xF6, 0x1D, 0xF7, 0x1D, 0xF8, 0x1D, 0xF9, 0x1D, 0xFA, 0x1D, 0xFB, 0x1D,
+ 0xFC, 0x1D, 0xFD, 0x1D, 0xFE, 0x1D, 0xFF, 0x1D, 0x00, 0x1E, 0x00, 0x1E, 0x02, 0x1E, 0x02, 0x1E,
+ 0x04, 0x1E, 0x04, 0x1E, 0x06, 0x1E, 0x06, 0x1E, 0x08, 0x1E, 0x08, 0x1E, 0x0A, 0x1E, 0x0A, 0x1E,
+ 0x0C, 0x1E, 0x0C, 0x1E, 0x0E, 0x1E, 0x0E, 0x1E, 0x10, 0x1E, 0x10, 0x1E, 0x12, 0x1E, 0x12, 0x1E,
+ 0x14, 0x1E, 0x14, 0x1E, 0x16, 0x1E, 0x16, 0x1E, 0x18, 0x1E, 0x18, 0x1E, 0x1A, 0x1E, 0x1A, 0x1E,
+ 0x1C, 0x1E, 0x1C, 0x1E, 0x1E, 0x1E, 0x1E, 0x1E, 0x20, 0x1E, 0x20, 0x1E, 0x22, 0x1E, 0x22, 0x1E,
+ 0x24, 0x1E, 0x24, 0x1E, 0x26, 0x1E, 0x26, 0x1E, 0x28, 0x1E, 0x28, 0x1E, 0x2A, 0x1E, 0x2A, 0x1E,
+ 0x2C, 0x1E, 0x2C, 0x1E, 0x2E, 0x1E, 0x2E, 0x1E, 0x30, 0x1E, 0x30, 0x1E, 0x32, 0x1E, 0x32, 0x1E,
+ 0x34, 0x1E, 0x34, 0x1E, 0x36, 0x1E, 0x36, 0x1E, 0x38, 0x1E, 0x38, 0x1E, 0x3A, 0x1E, 0x3A, 0x1E,
+ 0x3C, 0x1E, 0x3C, 0x1E, 0x3E, 0x1E, 0x3E, 0x1E, 0x40, 0x1E, 0x40, 0x1E, 0x42, 0x1E, 0x42, 0x1E,
+ 0x44, 0x1E, 0x44, 0x1E, 0x46, 0x1E, 0x46, 0x1E, 0x48, 0x1E, 0x48, 0x1E, 0x4A, 0x1E, 0x4A, 0x1E,
+ 0x4C, 0x1E, 0x4C, 0x1E, 0x4E, 0x1E, 0x4E, 0x1E, 0x50, 0x1E, 0x50, 0x1E, 0x52, 0x1E, 0x52, 0x1E,
+ 0x54, 0x1E, 0x54, 0x1E, 0x56, 0x1E, 0x56, 0x1E, 0x58, 0x1E, 0x58, 0x1E, 0x5A, 0x1E, 0x5A, 0x1E,
+ 0x5C, 0x1E, 0x5C, 0x1E, 0x5E, 0x1E, 0x5E, 0x1E, 0x60, 0x1E, 0x60, 0x1E, 0x62, 0x1E, 0x62, 0x1E,
+ 0x64, 0x1E, 0x64, 0x1E, 0x66, 0x1E, 0x66, 0x1E, 0x68, 0x1E, 0x68, 0x1E, 0x6A, 0x1E, 0x6A, 0x1E,
+ 0x6C, 0x1E, 0x6C, 0x1E, 0x6E, 0x1E, 0x6E, 0x1E, 0x70, 0x1E, 0x70, 0x1E, 0x72, 0x1E, 0x72, 0x1E,
+ 0x74, 0x1E, 0x74, 0x1E, 0x76, 0x1E, 0x76, 0x1E, 0x78, 0x1E, 0x78, 0x1E, 0x7A, 0x1E, 0x7A, 0x1E,
+ 0x7C, 0x1E, 0x7C, 0x1E, 0x7E, 0x1E, 0x7E, 0x1E, 0x80, 0x1E, 0x80, 0x1E, 0x82, 0x1E, 0x82, 0x1E,
+ 0x84, 0x1E, 0x84, 0x1E, 0x86, 0x1E, 0x86, 0x1E, 0x88, 0x1E, 0x88, 0x1E, 0x8A, 0x1E, 0x8A, 0x1E,
+ 0x8C, 0x1E, 0x8C, 0x1E, 0x8E, 0x1E, 0x8E, 0x1E, 0x90, 0x1E, 0x90, 0x1E, 0x92, 0x1E, 0x92, 0x1E,
+ 0x94, 0x1E, 0x94, 0x1E, 0x96, 0x1E, 0x97, 0x1E, 0x98, 0x1E, 0x99, 0x1E, 0x9A, 0x1E, 0x9B, 0x1E,
+ 0x9C, 0x1E, 0x9D, 0x1E, 0x9E, 0x1E, 0x9F, 0x1E, 0xA0, 0x1E, 0xA0, 0x1E, 0xA2, 0x1E, 0xA2, 0x1E,
+ 0xA4, 0x1E, 0xA4, 0x1E, 0xA6, 0x1E, 0xA6, 0x1E, 0xA8, 0x1E, 0xA8, 0x1E, 0xAA, 0x1E, 0xAA, 0x1E,
+ 0xAC, 0x1E, 0xAC, 0x1E, 0xAE, 0x1E, 0xAE, 0x1E, 0xB0, 0x1E, 0xB0, 0x1E, 0xB2, 0x1E, 0xB2, 0x1E,
+ 0xB4, 0x1E, 0xB4, 0x1E, 0xB6, 0x1E, 0xB6, 0x1E, 0xB8, 0x1E, 0xB8, 0x1E, 0xBA, 0x1E, 0xBA, 0x1E,
+ 0xBC, 0x1E, 0xBC, 0x1E, 0xBE, 0x1E, 0xBE, 0x1E, 0xC0, 0x1E, 0xC0, 0x1E, 0xC2, 0x1E, 0xC2, 0x1E,
+ 0xC4, 0x1E, 0xC4, 0x1E, 0xC6, 0x1E, 0xC6, 0x1E, 0xC8, 0x1E, 0xC8, 0x1E, 0xCA, 0x1E, 0xCA, 0x1E,
+ 0xCC, 0x1E, 0xCC, 0x1E, 0xCE, 0x1E, 0xCE, 0x1E, 0xD0, 0x1E, 0xD0, 0x1E, 0xD2, 0x1E, 0xD2, 0x1E,
+ 0xD4, 0x1E, 0xD4, 0x1E, 0xD6, 0x1E, 0xD6, 0x1E, 0xD8, 0x1E, 0xD8, 0x1E, 0xDA, 0x1E, 0xDA, 0x1E,
+ 0xDC, 0x1E, 0xDC, 0x1E, 0xDE, 0x1E, 0xDE, 0x1E, 0xE0, 0x1E, 0xE0, 0x1E, 0xE2, 0x1E, 0xE2, 0x1E,
+ 0xE4, 0x1E, 0xE4, 0x1E, 0xE6, 0x1E, 0xE6, 0x1E, 0xE8, 0x1E, 0xE8, 0x1E, 0xEA, 0x1E, 0xEA, 0x1E,
+ 0xEC, 0x1E, 0xEC, 0x1E, 0xEE, 0x1E, 0xEE, 0x1E, 0xF0, 0x1E, 0xF0, 0x1E, 0xF2, 0x1E, 0xF2, 0x1E,
+ 0xF4, 0x1E, 0xF4, 0x1E, 0xF6, 0x1E, 0xF6, 0x1E, 0xF8, 0x1E, 0xF8, 0x1E, 0xFA, 0x1E, 0xFB, 0x1E,
+ 0xFC, 0x1E, 0xFD, 0x1E, 0xFE, 0x1E, 0xFF, 0x1E, 0x08, 0x1F, 0x09, 0x1F, 0x0A, 0x1F, 0x0B, 0x1F,
+ 0x0C, 0x1F, 0x0D, 0x1F, 0x0E, 0x1F, 0x0F, 0x1F, 0x08, 0x1F, 0x09, 0x1F, 0x0A, 0x1F, 0x0B, 0x1F,
+ 0x0C, 0x1F, 0x0D, 0x1F, 0x0E, 0x1F, 0x0F, 0x1F, 0x18, 0x1F, 0x19, 0x1F, 0x1A, 0x1F, 0x1B, 0x1F,
+ 0x1C, 0x1F, 0x1D, 0x1F, 0x16, 0x1F, 0x17, 0x1F, 0x18, 0x1F, 0x19, 0x1F, 0x1A, 0x1F, 0x1B, 0x1F,
+ 0x1C, 0x1F, 0x1D, 0x1F, 0x1E, 0x1F, 0x1F, 0x1F, 0x28, 0x1F, 0x29, 0x1F, 0x2A, 0x1F, 0x2B, 0x1F,
+ 0x2C, 0x1F, 0x2D, 0x1F, 0x2E, 0x1F, 0x2F, 0x1F, 0x28, 0x1F, 0x29, 0x1F, 0x2A, 0x1F, 0x2B, 0x1F,
+ 0x2C, 0x1F, 0x2D, 0x1F, 0x2E, 0x1F, 0x2F, 0x1F, 0x38, 0x1F, 0x39, 0x1F, 0x3A, 0x1F, 0x3B, 0x1F,
+ 0x3C, 0x1F, 0x3D, 0x1F, 0x3E, 0x1F, 0x3F, 0x1F, 0x38, 0x1F, 0x39, 0x1F, 0x3A, 0x1F, 0x3B, 0x1F,
+ 0x3C, 0x1F, 0x3D, 0x1F, 0x3E, 0x1F, 0x3F, 0x1F, 0x48, 0x1F, 0x49, 0x1F, 0x4A, 0x1F, 0x4B, 0x1F,
+ 0x4C, 0x1F, 0x4D, 0x1F, 0x46, 0x1F, 0x47, 0x1F, 0x48, 0x1F, 0x49, 0x1F, 0x4A, 0x1F, 0x4B, 0x1F,
+ 0x4C, 0x1F, 0x4D, 0x1F, 0x4E, 0x1F, 0x4F, 0x1F, 0x50, 0x1F, 0x59, 0x1F, 0x52, 0x1F, 0x5B, 0x1F,
+ 0x54, 0x1F, 0x5D, 0x1F, 0x56, 0x1F, 0x5F, 0x1F, 0x58, 0x1F, 0x59, 0x1F, 0x5A, 0x1F, 0x5B, 0x1F,
+ 0x5C, 0x1F, 0x5D, 0x1F, 0x5E, 0x1F, 0x5F, 0x1F, 0x68, 0x1F, 0x69, 0x1F, 0x6A, 0x1F, 0x6B, 0x1F,
+ 0x6C, 0x1F, 0x6D, 0x1F, 0x6E, 0x1F, 0x6F, 0x1F, 0x68, 0x1F, 0x69, 0x1F, 0x6A, 0x1F, 0x6B, 0x1F,
+ 0x6C, 0x1F, 0x6D, 0x1F, 0x6E, 0x1F, 0x6F, 0x1F, 0xBA, 0x1F, 0xBB, 0x1F, 0xC8, 0x1F, 0xC9, 0x1F,
+ 0xCA, 0x1F, 0xCB, 0x1F, 0xDA, 0x1F, 0xDB, 0x1F, 0xF8, 0x1F, 0xF9, 0x1F, 0xEA, 0x1F, 0xEB, 0x1F,
+ 0xFA, 0x1F, 0xFB, 0x1F, 0x7E, 0x1F, 0x7F, 0x1F, 0x88, 0x1F, 0x89, 0x1F, 0x8A, 0x1F, 0x8B, 0x1F,
+ 0x8C, 0x1F, 0x8D, 0x1F, 0x8E, 0x1F, 0x8F, 0x1F, 0x88, 0x1F, 0x89, 0x1F, 0x8A, 0x1F, 0x8B, 0x1F,
+ 0x8C, 0x1F, 0x8D, 0x1F, 0x8E, 0x1F, 0x8F, 0x1F, 0x98, 0x1F, 0x99, 0x1F, 0x9A, 0x1F, 0x9B, 0x1F,
+ 0x9C, 0x1F, 0x9D, 0x1F, 0x9E, 0x1F, 0x9F, 0x1F, 0x98, 0x1F, 0x99, 0x1F, 0x9A, 0x1F, 0x9B, 0x1F,
+ 0x9C, 0x1F, 0x9D, 0x1F, 0x9E, 0x1F, 0x9F, 0x1F, 0xA8, 0x1F, 0xA9, 0x1F, 0xAA, 0x1F, 0xAB, 0x1F,
+ 0xAC, 0x1F, 0xAD, 0x1F, 0xAE, 0x1F, 0xAF, 0x1F, 0xA8, 0x1F, 0xA9, 0x1F, 0xAA, 0x1F, 0xAB, 0x1F,
+ 0xAC, 0x1F, 0xAD, 0x1F, 0xAE, 0x1F, 0xAF, 0x1F, 0xB8, 0x1F, 0xB9, 0x1F, 0xB2, 0x1F, 0xBC, 0x1F,
+ 0xB4, 0x1F, 0xB5, 0x1F, 0xB6, 0x1F, 0xB7, 0x1F, 0xB8, 0x1F, 0xB9, 0x1F, 0xBA, 0x1F, 0xBB, 0x1F,
+ 0xBC, 0x1F, 0xBD, 0x1F, 0xBE, 0x1F, 0xBF, 0x1F, 0xC0, 0x1F, 0xC1, 0x1F, 0xC2, 0x1F, 0xC3, 0x1F,
+ 0xC4, 0x1F, 0xC5, 0x1F, 0xC6, 0x1F, 0xC7, 0x1F, 0xC8, 0x1F, 0xC9, 0x1F, 0xCA, 0x1F, 0xCB, 0x1F,
+ 0xC3, 0x1F, 0xCD, 0x1F, 0xCE, 0x1F, 0xCF, 0x1F, 0xD8, 0x1F, 0xD9, 0x1F, 0xD2, 0x1F, 0xD3, 0x1F,
+ 0xD4, 0x1F, 0xD5, 0x1F, 0xD6, 0x1F, 0xD7, 0x1F, 0xD8, 0x1F, 0xD9, 0x1F, 0xDA, 0x1F, 0xDB, 0x1F,
+ 0xDC, 0x1F, 0xDD, 0x1F, 0xDE, 0x1F, 0xDF, 0x1F, 0xE8, 0x1F, 0xE9, 0x1F, 0xE2, 0x1F, 0xE3, 0x1F,
+ 0xE4, 0x1F, 0xEC, 0x1F, 0xE6, 0x1F, 0xE7, 0x1F, 0xE8, 0x1F, 0xE9, 0x1F, 0xEA, 0x1F, 0xEB, 0x1F,
+ 0xEC, 0x1F, 0xED, 0x1F, 0xEE, 0x1F, 0xEF, 0x1F, 0xF0, 0x1F, 0xF1, 0x1F, 0xF2, 0x1F, 0xF3, 0x1F,
+ 0xF4, 0x1F, 0xF5, 0x1F, 0xF6, 0x1F, 0xF7, 0x1F, 0xF8, 0x1F, 0xF9, 0x1F, 0xFA, 0x1F, 0xFB, 0x1F,
+ 0xF3, 0x1F, 0xFD, 0x1F, 0xFE, 0x1F, 0xFF, 0x1F, 0x00, 0x20, 0x01, 0x20, 0x02, 0x20, 0x03, 0x20,
+ 0x04, 0x20, 0x05, 0x20, 0x06, 0x20, 0x07, 0x20, 0x08, 0x20, 0x09, 0x20, 0x0A, 0x20, 0x0B, 0x20,
+ 0x0C, 0x20, 0x0D, 0x20, 0x0E, 0x20, 0x0F, 0x20, 0x10, 0x20, 0x11, 0x20, 0x12, 0x20, 0x13, 0x20,
+ 0x14, 0x20, 0x15, 0x20, 0x16, 0x20, 0x17, 0x20, 0x18, 0x20, 0x19, 0x20, 0x1A, 0x20, 0x1B, 0x20,
+ 0x1C, 0x20, 0x1D, 0x20, 0x1E, 0x20, 0x1F, 0x20, 0x20, 0x20, 0x21, 0x20, 0x22, 0x20, 0x23, 0x20,
+ 0x24, 0x20, 0x25, 0x20, 0x26, 0x20, 0x27, 0x20, 0x28, 0x20, 0x29, 0x20, 0x2A, 0x20, 0x2B, 0x20,
+ 0x2C, 0x20, 0x2D, 0x20, 0x2E, 0x20, 0x2F, 0x20, 0x30, 0x20, 0x31, 0x20, 0x32, 0x20, 0x33, 0x20,
+ 0x34, 0x20, 0x35, 0x20, 0x36, 0x20, 0x37, 0x20, 0x38, 0x20, 0x39, 0x20, 0x3A, 0x20, 0x3B, 0x20,
+ 0x3C, 0x20, 0x3D, 0x20, 0x3E, 0x20, 0x3F, 0x20, 0x40, 0x20, 0x41, 0x20, 0x42, 0x20, 0x43, 0x20,
+ 0x44, 0x20, 0x45, 0x20, 0x46, 0x20, 0x47, 0x20, 0x48, 0x20, 0x49, 0x20, 0x4A, 0x20, 0x4B, 0x20,
+ 0x4C, 0x20, 0x4D, 0x20, 0x4E, 0x20, 0x4F, 0x20, 0x50, 0x20, 0x51, 0x20, 0x52, 0x20, 0x53, 0x20,
+ 0x54, 0x20, 0x55, 0x20, 0x56, 0x20, 0x57, 0x20, 0x58, 0x20, 0x59, 0x20, 0x5A, 0x20, 0x5B, 0x20,
+ 0x5C, 0x20, 0x5D, 0x20, 0x5E, 0x20, 0x5F, 0x20, 0x60, 0x20, 0x61, 0x20, 0x62, 0x20, 0x63, 0x20,
+ 0x64, 0x20, 0x65, 0x20, 0x66, 0x20, 0x67, 0x20, 0x68, 0x20, 0x69, 0x20, 0x6A, 0x20, 0x6B, 0x20,
+ 0x6C, 0x20, 0x6D, 0x20, 0x6E, 0x20, 0x6F, 0x20, 0x70, 0x20, 0x71, 0x20, 0x72, 0x20, 0x73, 0x20,
+ 0x74, 0x20, 0x75, 0x20, 0x76, 0x20, 0x77, 0x20, 0x78, 0x20, 0x79, 0x20, 0x7A, 0x20, 0x7B, 0x20,
+ 0x7C, 0x20, 0x7D, 0x20, 0x7E, 0x20, 0x7F, 0x20, 0x80, 0x20, 0x81, 0x20, 0x82, 0x20, 0x83, 0x20,
+ 0x84, 0x20, 0x85, 0x20, 0x86, 0x20, 0x87, 0x20, 0x88, 0x20, 0x89, 0x20, 0x8A, 0x20, 0x8B, 0x20,
+ 0x8C, 0x20, 0x8D, 0x20, 0x8E, 0x20, 0x8F, 0x20, 0x90, 0x20, 0x91, 0x20, 0x92, 0x20, 0x93, 0x20,
+ 0x94, 0x20, 0x95, 0x20, 0x96, 0x20, 0x97, 0x20, 0x98, 0x20, 0x99, 0x20, 0x9A, 0x20, 0x9B, 0x20,
+ 0x9C, 0x20, 0x9D, 0x20, 0x9E, 0x20, 0x9F, 0x20, 0xA0, 0x20, 0xA1, 0x20, 0xA2, 0x20, 0xA3, 0x20,
+ 0xA4, 0x20, 0xA5, 0x20, 0xA6, 0x20, 0xA7, 0x20, 0xA8, 0x20, 0xA9, 0x20, 0xAA, 0x20, 0xAB, 0x20,
+ 0xAC, 0x20, 0xAD, 0x20, 0xAE, 0x20, 0xAF, 0x20, 0xB0, 0x20, 0xB1, 0x20, 0xB2, 0x20, 0xB3, 0x20,
+ 0xB4, 0x20, 0xB5, 0x20, 0xB6, 0x20, 0xB7, 0x20, 0xB8, 0x20, 0xB9, 0x20, 0xBA, 0x20, 0xBB, 0x20,
+ 0xBC, 0x20, 0xBD, 0x20, 0xBE, 0x20, 0xBF, 0x20, 0xC0, 0x20, 0xC1, 0x20, 0xC2, 0x20, 0xC3, 0x20,
+ 0xC4, 0x20, 0xC5, 0x20, 0xC6, 0x20, 0xC7, 0x20, 0xC8, 0x20, 0xC9, 0x20, 0xCA, 0x20, 0xCB, 0x20,
+ 0xCC, 0x20, 0xCD, 0x20, 0xCE, 0x20, 0xCF, 0x20, 0xD0, 0x20, 0xD1, 0x20, 0xD2, 0x20, 0xD3, 0x20,
+ 0xD4, 0x20, 0xD5, 0x20, 0xD6, 0x20, 0xD7, 0x20, 0xD8, 0x20, 0xD9, 0x20, 0xDA, 0x20, 0xDB, 0x20,
+ 0xDC, 0x20, 0xDD, 0x20, 0xDE, 0x20, 0xDF, 0x20, 0xE0, 0x20, 0xE1, 0x20, 0xE2, 0x20, 0xE3, 0x20,
+ 0xE4, 0x20, 0xE5, 0x20, 0xE6, 0x20, 0xE7, 0x20, 0xE8, 0x20, 0xE9, 0x20, 0xEA, 0x20, 0xEB, 0x20,
+ 0xEC, 0x20, 0xED, 0x20, 0xEE, 0x20, 0xEF, 0x20, 0xF0, 0x20, 0xF1, 0x20, 0xF2, 0x20, 0xF3, 0x20,
+ 0xF4, 0x20, 0xF5, 0x20, 0xF6, 0x20, 0xF7, 0x20, 0xF8, 0x20, 0xF9, 0x20, 0xFA, 0x20, 0xFB, 0x20,
+ 0xFC, 0x20, 0xFD, 0x20, 0xFE, 0x20, 0xFF, 0x20, 0x00, 0x21, 0x01, 0x21, 0x02, 0x21, 0x03, 0x21,
+ 0x04, 0x21, 0x05, 0x21, 0x06, 0x21, 0x07, 0x21, 0x08, 0x21, 0x09, 0x21, 0x0A, 0x21, 0x0B, 0x21,
+ 0x0C, 0x21, 0x0D, 0x21, 0x0E, 0x21, 0x0F, 0x21, 0x10, 0x21, 0x11, 0x21, 0x12, 0x21, 0x13, 0x21,
+ 0x14, 0x21, 0x15, 0x21, 0x16, 0x21, 0x17, 0x21, 0x18, 0x21, 0x19, 0x21, 0x1A, 0x21, 0x1B, 0x21,
+ 0x1C, 0x21, 0x1D, 0x21, 0x1E, 0x21, 0x1F, 0x21, 0x20, 0x21, 0x21, 0x21, 0x22, 0x21, 0x23, 0x21,
+ 0x24, 0x21, 0x25, 0x21, 0x26, 0x21, 0x27, 0x21, 0x28, 0x21, 0x29, 0x21, 0x2A, 0x21, 0x2B, 0x21,
+ 0x2C, 0x21, 0x2D, 0x21, 0x2E, 0x21, 0x2F, 0x21, 0x30, 0x21, 0x31, 0x21, 0x32, 0x21, 0x33, 0x21,
+ 0x34, 0x21, 0x35, 0x21, 0x36, 0x21, 0x37, 0x21, 0x38, 0x21, 0x39, 0x21, 0x3A, 0x21, 0x3B, 0x21,
+ 0x3C, 0x21, 0x3D, 0x21, 0x3E, 0x21, 0x3F, 0x21, 0x40, 0x21, 0x41, 0x21, 0x42, 0x21, 0x43, 0x21,
+ 0x44, 0x21, 0x45, 0x21, 0x46, 0x21, 0x47, 0x21, 0x48, 0x21, 0x49, 0x21, 0x4A, 0x21, 0x4B, 0x21,
+ 0x4C, 0x21, 0x4D, 0x21, 0x32, 0x21, 0x4F, 0x21, 0x50, 0x21, 0x51, 0x21, 0x52, 0x21, 0x53, 0x21,
+ 0x54, 0x21, 0x55, 0x21, 0x56, 0x21, 0x57, 0x21, 0x58, 0x21, 0x59, 0x21, 0x5A, 0x21, 0x5B, 0x21,
+ 0x5C, 0x21, 0x5D, 0x21, 0x5E, 0x21, 0x5F, 0x21, 0x60, 0x21, 0x61, 0x21, 0x62, 0x21, 0x63, 0x21,
+ 0x64, 0x21, 0x65, 0x21, 0x66, 0x21, 0x67, 0x21, 0x68, 0x21, 0x69, 0x21, 0x6A, 0x21, 0x6B, 0x21,
+ 0x6C, 0x21, 0x6D, 0x21, 0x6E, 0x21, 0x6F, 0x21, 0x60, 0x21, 0x61, 0x21, 0x62, 0x21, 0x63, 0x21,
+ 0x64, 0x21, 0x65, 0x21, 0x66, 0x21, 0x67, 0x21, 0x68, 0x21, 0x69, 0x21, 0x6A, 0x21, 0x6B, 0x21,
+ 0x6C, 0x21, 0x6D, 0x21, 0x6E, 0x21, 0x6F, 0x21, 0x80, 0x21, 0x81, 0x21, 0x82, 0x21, 0x83, 0x21,
+ 0x83, 0x21, 0xFF, 0xFF, 0x4B, 0x03, 0xB6, 0x24, 0xB7, 0x24, 0xB8, 0x24, 0xB9, 0x24, 0xBA, 0x24,
+ 0xBB, 0x24, 0xBC, 0x24, 0xBD, 0x24, 0xBE, 0x24, 0xBF, 0x24, 0xC0, 0x24, 0xC1, 0x24, 0xC2, 0x24,
+ 0xC3, 0x24, 0xC4, 0x24, 0xC5, 0x24, 0xC6, 0x24, 0xC7, 0x24, 0xC8, 0x24, 0xC9, 0x24, 0xCA, 0x24,
+ 0xCB, 0x24, 0xCC, 0x24, 0xCD, 0x24, 0xCE, 0x24, 0xCF, 0x24, 0xFF, 0xFF, 0x46, 0x07, 0x00, 0x2C,
+ 0x01, 0x2C, 0x02, 0x2C, 0x03, 0x2C, 0x04, 0x2C, 0x05, 0x2C, 0x06, 0x2C, 0x07, 0x2C, 0x08, 0x2C,
+ 0x09, 0x2C, 0x0A, 0x2C, 0x0B, 0x2C, 0x0C, 0x2C, 0x0D, 0x2C, 0x0E, 0x2C, 0x0F, 0x2C, 0x10, 0x2C,
+ 0x11, 0x2C, 0x12, 0x2C, 0x13, 0x2C, 0x14, 0x2C, 0x15, 0x2C, 0x16, 0x2C, 0x17, 0x2C, 0x18, 0x2C,
+ 0x19, 0x2C, 0x1A, 0x2C, 0x1B, 0x2C, 0x1C, 0x2C, 0x1D, 0x2C, 0x1E, 0x2C, 0x1F, 0x2C, 0x20, 0x2C,
+ 0x21, 0x2C, 0x22, 0x2C, 0x23, 0x2C, 0x24, 0x2C, 0x25, 0x2C, 0x26, 0x2C, 0x27, 0x2C, 0x28, 0x2C,
+ 0x29, 0x2C, 0x2A, 0x2C, 0x2B, 0x2C, 0x2C, 0x2C, 0x2D, 0x2C, 0x2E, 0x2C, 0x5F, 0x2C, 0x60, 0x2C,
+ 0x60, 0x2C, 0x62, 0x2C, 0x63, 0x2C, 0x64, 0x2C, 0x65, 0x2C, 0x66, 0x2C, 0x67, 0x2C, 0x67, 0x2C,
+ 0x69, 0x2C, 0x69, 0x2C, 0x6B, 0x2C, 0x6B, 0x2C, 0x6D, 0x2C, 0x6E, 0x2C, 0x6F, 0x2C, 0x70, 0x2C,
+ 0x71, 0x2C, 0x72, 0x2C, 0x73, 0x2C, 0x74, 0x2C, 0x75, 0x2C, 0x75, 0x2C, 0x77, 0x2C, 0x78, 0x2C,
+ 0x79, 0x2C, 0x7A, 0x2C, 0x7B, 0x2C, 0x7C, 0x2C, 0x7D, 0x2C, 0x7E, 0x2C, 0x7F, 0x2C, 0x80, 0x2C,
+ 0x80, 0x2C, 0x82, 0x2C, 0x82, 0x2C, 0x84, 0x2C, 0x84, 0x2C, 0x86, 0x2C, 0x86, 0x2C, 0x88, 0x2C,
+ 0x88, 0x2C, 0x8A, 0x2C, 0x8A, 0x2C, 0x8C, 0x2C, 0x8C, 0x2C, 0x8E, 0x2C, 0x8E, 0x2C, 0x90, 0x2C,
+ 0x90, 0x2C, 0x92, 0x2C, 0x92, 0x2C, 0x94, 0x2C, 0x94, 0x2C, 0x96, 0x2C, 0x96, 0x2C, 0x98, 0x2C,
+ 0x98, 0x2C, 0x9A, 0x2C, 0x9A, 0x2C, 0x9C, 0x2C, 0x9C, 0x2C, 0x9E, 0x2C, 0x9E, 0x2C, 0xA0, 0x2C,
+ 0xA0, 0x2C, 0xA2, 0x2C, 0xA2, 0x2C, 0xA4, 0x2C, 0xA4, 0x2C, 0xA6, 0x2C, 0xA6, 0x2C, 0xA8, 0x2C,
+ 0xA8, 0x2C, 0xAA, 0x2C, 0xAA, 0x2C, 0xAC, 0x2C, 0xAC, 0x2C, 0xAE, 0x2C, 0xAE, 0x2C, 0xB0, 0x2C,
+ 0xB0, 0x2C, 0xB2, 0x2C, 0xB2, 0x2C, 0xB4, 0x2C, 0xB4, 0x2C, 0xB6, 0x2C, 0xB6, 0x2C, 0xB8, 0x2C,
+ 0xB8, 0x2C, 0xBA, 0x2C, 0xBA, 0x2C, 0xBC, 0x2C, 0xBC, 0x2C, 0xBE, 0x2C, 0xBE, 0x2C, 0xC0, 0x2C,
+ 0xC0, 0x2C, 0xC2, 0x2C, 0xC2, 0x2C, 0xC4, 0x2C, 0xC4, 0x2C, 0xC6, 0x2C, 0xC6, 0x2C, 0xC8, 0x2C,
+ 0xC8, 0x2C, 0xCA, 0x2C, 0xCA, 0x2C, 0xCC, 0x2C, 0xCC, 0x2C, 0xCE, 0x2C, 0xCE, 0x2C, 0xD0, 0x2C,
+ 0xD0, 0x2C, 0xD2, 0x2C, 0xD2, 0x2C, 0xD4, 0x2C, 0xD4, 0x2C, 0xD6, 0x2C, 0xD6, 0x2C, 0xD8, 0x2C,
+ 0xD8, 0x2C, 0xDA, 0x2C, 0xDA, 0x2C, 0xDC, 0x2C, 0xDC, 0x2C, 0xDE, 0x2C, 0xDE, 0x2C, 0xE0, 0x2C,
+ 0xE0, 0x2C, 0xE2, 0x2C, 0xE2, 0x2C, 0xE4, 0x2C, 0xE5, 0x2C, 0xE6, 0x2C, 0xE7, 0x2C, 0xE8, 0x2C,
+ 0xE9, 0x2C, 0xEA, 0x2C, 0xEB, 0x2C, 0xEC, 0x2C, 0xED, 0x2C, 0xEE, 0x2C, 0xEF, 0x2C, 0xF0, 0x2C,
+ 0xF1, 0x2C, 0xF2, 0x2C, 0xF3, 0x2C, 0xF4, 0x2C, 0xF5, 0x2C, 0xF6, 0x2C, 0xF7, 0x2C, 0xF8, 0x2C,
+ 0xF9, 0x2C, 0xFA, 0x2C, 0xFB, 0x2C, 0xFC, 0x2C, 0xFD, 0x2C, 0xFE, 0x2C, 0xFF, 0x2C, 0xA0, 0x10,
+ 0xA1, 0x10, 0xA2, 0x10, 0xA3, 0x10, 0xA4, 0x10, 0xA5, 0x10, 0xA6, 0x10, 0xA7, 0x10, 0xA8, 0x10,
+ 0xA9, 0x10, 0xAA, 0x10, 0xAB, 0x10, 0xAC, 0x10, 0xAD, 0x10, 0xAE, 0x10, 0xAF, 0x10, 0xB0, 0x10,
+ 0xB1, 0x10, 0xB2, 0x10, 0xB3, 0x10, 0xB4, 0x10, 0xB5, 0x10, 0xB6, 0x10, 0xB7, 0x10, 0xB8, 0x10,
+ 0xB9, 0x10, 0xBA, 0x10, 0xBB, 0x10, 0xBC, 0x10, 0xBD, 0x10, 0xBE, 0x10, 0xBF, 0x10, 0xC0, 0x10,
+ 0xC1, 0x10, 0xC2, 0x10, 0xC3, 0x10, 0xC4, 0x10, 0xC5, 0x10, 0xFF, 0xFF, 0x1B, 0xD2, 0x21, 0xFF,
+ 0x22, 0xFF, 0x23, 0xFF, 0x24, 0xFF, 0x25, 0xFF, 0x26, 0xFF, 0x27, 0xFF, 0x28, 0xFF, 0x29, 0xFF,
+ 0x2A, 0xFF, 0x2B, 0xFF, 0x2C, 0xFF, 0x2D, 0xFF, 0x2E, 0xFF, 0x2F, 0xFF, 0x30, 0xFF, 0x31, 0xFF,
+ 0x32, 0xFF, 0x33, 0xFF, 0x34, 0xFF, 0x35, 0xFF, 0x36, 0xFF, 0x37, 0xFF, 0x38, 0xFF, 0x39, 0xFF,
+ 0x3A, 0xFF, 0x5B, 0xFF, 0x5C, 0xFF, 0x5D, 0xFF, 0x5E, 0xFF, 0x5F, 0xFF, 0x60, 0xFF, 0x61, 0xFF,
+ 0x62, 0xFF, 0x63, 0xFF, 0x64, 0xFF, 0x65, 0xFF, 0x66, 0xFF, 0x67, 0xFF, 0x68, 0xFF, 0x69, 0xFF,
+ 0x6A, 0xFF, 0x6B, 0xFF, 0x6C, 0xFF, 0x6D, 0xFF, 0x6E, 0xFF, 0x6F, 0xFF, 0x70, 0xFF, 0x71, 0xFF,
+ 0x72, 0xFF, 0x73, 0xFF, 0x74, 0xFF, 0x75, 0xFF, 0x76, 0xFF, 0x77, 0xFF, 0x78, 0xFF, 0x79, 0xFF,
+ 0x7A, 0xFF, 0x7B, 0xFF, 0x7C, 0xFF, 0x7D, 0xFF, 0x7E, 0xFF, 0x7F, 0xFF, 0x80, 0xFF, 0x81, 0xFF,
+ 0x82, 0xFF, 0x83, 0xFF, 0x84, 0xFF, 0x85, 0xFF, 0x86, 0xFF, 0x87, 0xFF, 0x88, 0xFF, 0x89, 0xFF,
+ 0x8A, 0xFF, 0x8B, 0xFF, 0x8C, 0xFF, 0x8D, 0xFF, 0x8E, 0xFF, 0x8F, 0xFF, 0x90, 0xFF, 0x91, 0xFF,
+ 0x92, 0xFF, 0x93, 0xFF, 0x94, 0xFF, 0x95, 0xFF, 0x96, 0xFF, 0x97, 0xFF, 0x98, 0xFF, 0x99, 0xFF,
+ 0x9A, 0xFF, 0x9B, 0xFF, 0x9C, 0xFF, 0x9D, 0xFF, 0x9E, 0xFF, 0x9F, 0xFF, 0xA0, 0xFF, 0xA1, 0xFF,
+ 0xA2, 0xFF, 0xA3, 0xFF, 0xA4, 0xFF, 0xA5, 0xFF, 0xA6, 0xFF, 0xA7, 0xFF, 0xA8, 0xFF, 0xA9, 0xFF,
+ 0xAA, 0xFF, 0xAB, 0xFF, 0xAC, 0xFF, 0xAD, 0xFF, 0xAE, 0xFF, 0xAF, 0xFF, 0xB0, 0xFF, 0xB1, 0xFF,
+ 0xB2, 0xFF, 0xB3, 0xFF, 0xB4, 0xFF, 0xB5, 0xFF, 0xB6, 0xFF, 0xB7, 0xFF, 0xB8, 0xFF, 0xB9, 0xFF,
+ 0xBA, 0xFF, 0xBB, 0xFF, 0xBC, 0xFF, 0xBD, 0xFF, 0xBE, 0xFF, 0xBF, 0xFF, 0xC0, 0xFF, 0xC1, 0xFF,
+ 0xC2, 0xFF, 0xC3, 0xFF, 0xC4, 0xFF, 0xC5, 0xFF, 0xC6, 0xFF, 0xC7, 0xFF, 0xC8, 0xFF, 0xC9, 0xFF,
+ 0xCA, 0xFF, 0xCB, 0xFF, 0xCC, 0xFF, 0xCD, 0xFF, 0xCE, 0xFF, 0xCF, 0xFF, 0xD0, 0xFF, 0xD1, 0xFF,
+ 0xD2, 0xFF, 0xD3, 0xFF, 0xD4, 0xFF, 0xD5, 0xFF, 0xD6, 0xFF, 0xD7, 0xFF, 0xD8, 0xFF, 0xD9, 0xFF,
+ 0xDA, 0xFF, 0xDB, 0xFF, 0xDC, 0xFF, 0xDD, 0xFF, 0xDE, 0xFF, 0xDF, 0xFF, 0xE0, 0xFF, 0xE1, 0xFF,
+ 0xE2, 0xFF, 0xE3, 0xFF, 0xE4, 0xFF, 0xE5, 0xFF, 0xE6, 0xFF, 0xE7, 0xFF, 0xE8, 0xFF, 0xE9, 0xFF,
+ 0xEA, 0xFF, 0xEB, 0xFF, 0xEC, 0xFF, 0xED, 0xFF, 0xEE, 0xFF, 0xEF, 0xFF, 0xF0, 0xFF, 0xF1, 0xFF,
+ 0xF2, 0xFF, 0xF3, 0xFF, 0xF4, 0xFF, 0xF5, 0xFF, 0xF6, 0xFF, 0xF7, 0xFF, 0xF8, 0xFF, 0xF9, 0xFF,
+ 0xFA, 0xFF, 0xFB, 0xFF, 0xFC, 0xFF, 0xFD, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF
+};
+
+#endif /* _UPCASE_H */
--- /dev/null
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/************************************************************************/
+/* */
+/* PROJECT : exFAT & FAT12/16/32 File System */
+/* FILE : version.h */
+/* PURPOSE : sdFAT File Manager */
+/* */
+/************************************************************************/
+
+/* @fs.sec -- 8304f2146e71f0f27864eebd58892245 -- */
+#define SDFAT_VERSION "2.4.5"
--- /dev/null
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/************************************************************************/
+/* */
+/* PROJECT : exFAT & FAT12/16/32 File System */
+/* FILE : xattr.c */
+/* PURPOSE : sdFAT code for supporting xattr(Extended File Attributes) */
+/* */
+/*----------------------------------------------------------------------*/
+/* NOTES */
+/* */
+/* */
+/************************************************************************/
+
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/xattr.h>
+#include <linux/dcache.h>
+#include "sdfat.h"
+
+#ifndef CONFIG_SDFAT_VIRTUAL_XATTR_SELINUX_LABEL
+#define CONFIG_SDFAT_VIRTUAL_XATTR_SELINUX_LABEL ("undefined")
+#endif
+
+static const char default_xattr[] = CONFIG_SDFAT_VIRTUAL_XATTR_SELINUX_LABEL;
+
+static int can_support(const char *name)
+{
+ if (!name || strcmp(name, "security.selinux"))
+ return -1;
+ return 0;
+}
+
+ssize_t sdfat_listxattr(struct dentry *dentry, char *list, size_t size)
+{
+ return 0;
+}
+
+
+/*************************************************************************
+ * INNER FUNCTIONS WHICH HAS KERNEL VERSION DEPENDENCY
+ *************************************************************************/
+static int __sdfat_xattr_check_support(const char *name)
+{
+ if (can_support(name))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+ssize_t __sdfat_getxattr(const char *name, void *value, size_t size)
+{
+ if (can_support(name))
+ return -EOPNOTSUPP;
+
+ if ((size > strlen(default_xattr)+1) && value)
+ strcpy(value, default_xattr);
+
+ return strlen(default_xattr);
+}
+
+
+/*************************************************************************
+ * FUNCTIONS WHICH HAS KERNEL VERSION DEPENDENCY
+ *************************************************************************/
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
+#if defined(CONFIG_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
+static int sdfat_xattr_get(const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *inode,
+ const char *name, void *buffer, size_t size,
+ int flags)
+{
+ return __sdfat_getxattr(name, buffer, size);
+}
+#else
+static int sdfat_xattr_get(const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *inode,
+ const char *name, void *buffer, size_t size)
+{
+ return __sdfat_getxattr(name, buffer, size);
+}
+#endif
+
+static int sdfat_xattr_set(const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value, size_t size,
+ int flags)
+{
+ return __sdfat_xattr_check_support(name);
+}
+
+const struct xattr_handler sdfat_xattr_handler = {
+ .prefix = "", /* match anything */
+ .get = sdfat_xattr_get,
+ .set = sdfat_xattr_set,
+};
+
+const struct xattr_handler *sdfat_xattr_handlers[] = {
+ &sdfat_xattr_handler,
+ NULL
+};
+
+void setup_sdfat_xattr_handler(struct super_block *sb)
+{
+ sb->s_xattr = sdfat_xattr_handlers;
+}
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0) */
+int sdfat_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags)
+{
+ return __sdfat_xattr_check_support(name);
+}
+
+ssize_t sdfat_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
+{
+ return __sdfat_getxattr(name, value, size);
+}
+
+int sdfat_removexattr(struct dentry *dentry, const char *name)
+{
+ return __sdfat_xattr_check_support(name);
+}
+
+void setup_sdfat_xattr_handler(struct super_block *sb)
+{
+ /* DO NOTHING */
+}
+#endif