2 * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 /************************************************************************/
20 /* @PROJECT : exFAT & FAT12/16/32 File System */
22 /* @PURPOSE : Defragmentation support for SDFAT32 */
24 /*----------------------------------------------------------------------*/
28 /************************************************************************/
30 #include <linux/version.h>
31 #include <linux/list.h>
32 #include <linux/blkdev.h>
36 #include "amap_smart.h"
38 #ifdef CONFIG_SDFAT_DFR
41 * @brief get HW params for defrag daemon
42 * @return 0 on success, -errno otherwise
43 * @param sb super block
44 * @param arg defrag info arguments
45 * @remark protected by super_block
49 IN
struct super_block
*sb
,
50 OUT
struct defrag_info_arg
*arg
)
52 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
53 AMAP_T
*amap
= SDFAT_SB(sb
)->fsi
.amap
;
58 arg
->sec_sz
= sb
->s_blocksize
;
59 arg
->clus_sz
= fsi
->cluster_size
;
60 arg
->total_sec
= fsi
->num_sectors
;
61 arg
->fat_offset_sec
= fsi
->FAT1_start_sector
;
62 arg
->fat_sz_sec
= fsi
->num_FAT_sectors
;
63 arg
->n_fat
= (fsi
->FAT1_start_sector
== fsi
->FAT2_start_sector
) ? 1 : 2;
65 arg
->sec_per_au
= amap
->option
.au_size
;
66 arg
->hidden_sectors
= amap
->option
.au_align_factor
% amap
->option
.au_size
;
74 IN
struct super_block
*sb
,
75 IN DOS_DENTRY_T
*dos_ep
,
77 OUT
struct defrag_trav_arg
*arg
)
79 FS_INFO_T
*fsi
= NULL
;
81 unsigned int type
= 0, start_clus
= 0;
85 ERR_HANDLE2((!sb
|| !dos_ep
|| !i_pos
|| !arg
), err
, -EINVAL
);
86 fsi
= &(SDFAT_SB(sb
)->fsi
);
88 /* Get given entry's type */
89 type
= fsi
->fs_func
->get_entry_type((DENTRY_T
*) dos_ep
);
92 if (!strncmp(dos_ep
->name
, DOS_CUR_DIR_NAME
, DOS_NAME_LENGTH
)) {
94 } else if (!strncmp(dos_ep
->name
, DOS_PAR_DIR_NAME
, DOS_NAME_LENGTH
)) {
96 } else if ((type
== TYPE_DIR
) || (type
== TYPE_FILE
)) {
99 SET32_HI(start_clus
, le16_to_cpu(dos_ep
->start_clu_hi
));
100 SET32_LO(start_clus
, le16_to_cpu(dos_ep
->start_clu_lo
));
101 arg
->start_clus
= start_clus
;
103 /* Set type & i_pos */
104 if (type
== TYPE_DIR
)
105 arg
->type
= DFR_TRAV_TYPE_DIR
;
107 arg
->type
= DFR_TRAV_TYPE_FILE
;
112 memset(&uniname
, 0, sizeof(UNI_NAME_T
));
113 get_uniname_from_dos_entry(sb
, dos_ep
, &uniname
, 0x1);
115 * we should think that whether the size of arg->name
118 nls_uni16s_to_vfsname(sb
, &uniname
,
119 arg
->name
, sizeof(arg
->name
));
123 } else if (type
== TYPE_UNUSED
) {
135 * @fn defrag_scan_dir
136 * @brief scan given directory
137 * @return 0 on success, -errno otherwise
138 * @param sb super block
139 * @param args traverse args
140 * @remark protected by inode_lock, super_block and volume lock
144 IN
struct super_block
*sb
,
145 INOUT
struct defrag_trav_arg
*args
)
147 struct sdfat_sb_info
*sbi
= NULL
;
148 FS_INFO_T
*fsi
= NULL
;
149 struct defrag_trav_header
*header
= NULL
;
150 DOS_DENTRY_T
*dos_ep
;
152 int dot_found
= 0, args_idx
= DFR_TRAV_HEADER_IDX
+ 1, clus
= 0, index
= 0;
156 ERR_HANDLE2((!sb
|| !args
), err
, -EINVAL
);
159 header
= (struct defrag_trav_header
*) args
;
161 /* Exceptional case for ROOT */
162 if (header
->i_pos
== DFR_TRAV_ROOT_IPOS
) {
163 header
->start_clus
= fsi
->root_dir
;
164 dfr_debug("IOC_DFR_TRAV for ROOT: start_clus %08x", header
->start_clus
);
168 chain
.dir
= header
->start_clus
;
172 /* Check if this is directory */
174 FAT32_CHECK_CLUSTER(fsi
, chain
.dir
, err
);
176 dos_ep
= (DOS_DENTRY_T
*) get_dentry_in_dir(sb
, &chain
, 0, NULL
);
177 ERR_HANDLE2(!dos_ep
, err
, -EIO
);
179 if (strncmp(dos_ep
->name
, DOS_CUR_DIR_NAME
, DOS_NAME_LENGTH
)) {
181 dfr_err("Scan: Not a directory, err %d", err
);
186 /* For more-scan case */
187 if ((header
->stat
== DFR_TRAV_STAT_MORE
) &&
188 (header
->start_clus
== sbi
->dfr_hint_clus
) &&
189 (sbi
->dfr_hint_idx
> 0)) {
191 index
= sbi
->dfr_hint_idx
;
192 for (j
= 0; j
< (sbi
->dfr_hint_idx
/ fsi
->dentries_per_clu
); j
++) {
193 /* Follow FAT-chain */
194 FAT32_CHECK_CLUSTER(fsi
, chain
.dir
, err
);
196 err
= fat_ent_get(sb
, chain
.dir
, &(chain
.dir
));
199 if (!IS_CLUS_EOF(chain
.dir
)) {
201 index
-= fsi
->dentries_per_clu
;
204 * This directory modified. Stop scanning.
207 dfr_err("Scan: SCAN_MORE failed, err %d", err
);
212 /* For first-scan case */
219 /* Scan given directory and get info of children */
220 for ( ; index
< fsi
->dentries_per_clu
; index
++) {
221 DOS_DENTRY_T
*dos_ep
= NULL
;
225 FAT32_CHECK_CLUSTER(fsi
, chain
.dir
, err
);
227 dos_ep
= (DOS_DENTRY_T
*) get_dentry_in_dir(sb
, &chain
, index
, NULL
);
228 ERR_HANDLE2(!dos_ep
, err
, -EIO
);
230 /* Make i_pos for this entry */
231 SET64_HI(i_pos
, header
->start_clus
);
232 SET64_LO(i_pos
, clus
* fsi
->dentries_per_clu
+ index
);
234 err
= __defrag_scan_dir(sb
, dos_ep
, i_pos
, &args
[args_idx
]);
237 if (++args_idx
>= (PAGE_SIZE
/ sizeof(struct defrag_trav_arg
))) {
238 sbi
->dfr_hint_clus
= header
->start_clus
;
239 sbi
->dfr_hint_idx
= clus
* fsi
->dentries_per_clu
+ index
+ 1;
241 header
->stat
= DFR_TRAV_STAT_MORE
;
242 header
->nr_entries
= args_idx
;
246 } else if (err
== -EINVAL
) {
247 sbi
->dfr_hint_clus
= sbi
->dfr_hint_idx
= 0;
248 dfr_err("Scan: err %d", err
);
251 } else if (err
== -ENOENT
) {
252 sbi
->dfr_hint_clus
= sbi
->dfr_hint_idx
= 0;
261 /* Follow FAT-chain */
262 FAT32_CHECK_CLUSTER(fsi
, chain
.dir
, err
);
264 err
= fat_ent_get(sb
, chain
.dir
, &(chain
.dir
));
267 if (!IS_CLUS_EOF(chain
.dir
)) {
275 header
->stat
= DFR_TRAV_STAT_DONE
;
276 header
->nr_entries
= args_idx
;
284 __defrag_validate_cluster_prev(
285 IN
struct super_block
*sb
,
286 IN
struct defrag_chunk_info
*chunk
)
288 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
291 unsigned int entry
= 0, clus
= 0;
294 if (chunk
->prev_clus
== 0) {
295 /* For the first cluster of a file */
296 dir
.dir
= GET64_HI(chunk
->i_pos
);
297 dir
.flags
= 0x1; // Assume non-continuous
299 entry
= GET64_LO(chunk
->i_pos
);
301 FAT32_CHECK_CLUSTER(fsi
, dir
.dir
, err
);
303 ep
= get_dentry_in_dir(sb
, &dir
, entry
, NULL
);
309 /* should call fat_get_entry_clu0(ep) */
310 clus
= fsi
->fs_func
->get_entry_clu0(ep
);
311 if (clus
!= chunk
->d_clus
) {
317 FAT32_CHECK_CLUSTER(fsi
, chunk
->prev_clus
, err
);
319 err
= fat_ent_get(sb
, chunk
->prev_clus
, &clus
);
322 if (chunk
->d_clus
!= clus
)
332 __defrag_validate_cluster_next(
333 IN
struct super_block
*sb
,
334 IN
struct defrag_chunk_info
*chunk
)
336 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
337 unsigned int clus
= 0;
340 /* Check next_clus */
341 FAT32_CHECK_CLUSTER(fsi
, (chunk
->d_clus
+ chunk
->nr_clus
- 1), err
);
343 err
= fat_ent_get(sb
, (chunk
->d_clus
+ chunk
->nr_clus
- 1), &clus
);
346 if (chunk
->next_clus
!= (clus
& FAT32_EOF
))
355 * @fn __defrag_check_au
356 * @brief check if this AU is in use
357 * @return 0 if idle, 1 if busy
358 * @param sb super block
359 * @param clus physical cluster num
360 * @param limit # of used clusters from daemon
364 struct super_block
*sb
,
368 unsigned int nr_free
= amap_get_freeclus(sb
, clus
);
370 #if defined(CONFIG_SDFAT_DFR_DEBUG) && defined(CONFIG_SDFAT_DBG_MSG)
371 if (nr_free
< limit
) {
372 AMAP_T
*amap
= SDFAT_SB(sb
)->fsi
.amap
;
373 AU_INFO_T
*au
= GET_AU(amap
, i_AU_of_CLU(amap
, clus
));
375 dfr_debug("AU[%d] nr_free %d, limit %d", au
->idx
, nr_free
, limit
);
378 return ((nr_free
< limit
) ? 1 : 0);
383 * @fn defrag_validate_cluster
384 * @brief validate cluster info of given chunk
385 * @return 0 on success, -errno otherwise
386 * @param inode inode of given chunk
387 * @param chunk given chunk
388 * @param skip_prev flag to skip checking previous cluster info
389 * @remark protected by super_block and volume lock
392 defrag_validate_cluster(
393 IN
struct inode
*inode
,
394 IN
struct defrag_chunk_info
*chunk
,
397 struct super_block
*sb
= inode
->i_sb
;
398 FILE_ID_T
*fid
= &(SDFAT_I(inode
)->fid
);
399 unsigned int clus
= 0;
402 /* If this inode is unlink-ed, skip it */
403 if (fid
->dir
.dir
== DIR_DELETED
)
406 /* Skip working-AU */
407 err
= amap_check_working(sb
, chunk
->d_clus
);
411 /* Check # of free_clus of belonged AU */
412 err
= __defrag_check_au(inode
->i_sb
, chunk
->d_clus
, CLUS_PER_AU(sb
) - chunk
->au_clus
);
416 /* Check chunk's clusters */
417 for (i
= 0; i
< chunk
->nr_clus
; i
++) {
418 err
= fsapi_map_clus(inode
, chunk
->f_clus
+ i
, &clus
, ALLOC_NOWHERE
);
419 if (err
|| (chunk
->d_clus
+ i
!= clus
)) {
426 /* Check next_clus */
427 err
= __defrag_validate_cluster_next(sb
, chunk
);
431 /* Check prev_clus */
432 err
= __defrag_validate_cluster_prev(sb
, chunk
);
442 * @fn defrag_reserve_clusters
443 * @brief reserve clusters for defrag
444 * @return 0 on success, -errno otherwise
445 * @param sb super block
446 * @param nr_clus # of clusters to reserve
447 * @remark protected by super_block and volume lock
450 defrag_reserve_clusters(
451 INOUT
struct super_block
*sb
,
454 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
455 FS_INFO_T
*fsi
= &(sbi
->fsi
);
457 if (!(sbi
->options
.improved_allocation
& SDFAT_ALLOC_DELAY
))
461 /* Update used_clusters */
462 if (fsi
->used_clusters
== (u32
) ~0) {
463 if (fsi
->fs_func
->count_used_clusters(sb
, &fsi
->used_clusters
))
467 /* Check error case */
468 if (fsi
->used_clusters
+ fsi
->reserved_clusters
+ nr_clus
>= fsi
->num_clusters
- 2) {
470 } else if (fsi
->reserved_clusters
+ nr_clus
< 0) {
471 dfr_err("Reserve count: reserved_clusters %d, nr_clus %d",
472 fsi
->reserved_clusters
, nr_clus
);
473 BUG_ON(fsi
->reserved_clusters
+ nr_clus
< 0);
476 sbi
->dfr_reserved_clus
+= nr_clus
;
477 fsi
->reserved_clusters
+= nr_clus
;
484 * @fn defrag_mark_ignore
485 * @brief mark corresponding AU to be ignored
486 * @return 0 on success, -errno otherwise
487 * @param sb super block
488 * @param clus given cluster num
489 * @remark protected by super_block
493 INOUT
struct super_block
*sb
,
494 IN
unsigned int clus
)
498 if (SDFAT_SB(sb
)->options
.improved_allocation
& SDFAT_ALLOC_SMART
)
499 err
= amap_mark_ignore(sb
, clus
);
502 dfr_debug("err %d", err
);
508 * @fn defrag_unmark_ignore_all
509 * @brief unmark all ignored AUs
511 * @param sb super block
512 * @remark protected by super_block
515 defrag_unmark_ignore_all(struct super_block
*sb
)
517 if (SDFAT_SB(sb
)->options
.improved_allocation
& SDFAT_ALLOC_SMART
)
518 amap_unmark_ignore_all(sb
);
523 * @fn defrag_map_cluster
524 * @brief get_block function for defrag dests
525 * @return 0 on success, -errno otherwise
527 * @param clu_offset logical cluster offset
528 * @param clu mapped cluster (physical)
529 * @remark protected by super_block and volume lock
534 unsigned int clu_offset
,
537 struct super_block
*sb
= inode
->i_sb
;
538 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
539 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
540 #ifdef CONFIG_SDFAT_DFR_PACKING
541 AMAP_T
*amap
= SDFAT_SB(sb
)->fsi
.amap
;
543 FILE_ID_T
*fid
= &(SDFAT_I(inode
)->fid
);
544 struct defrag_info
*ino_dfr
= &(SDFAT_I(inode
)->dfr_info
);
545 struct defrag_chunk_info
*chunk
= NULL
;
547 int num
= 0, i
= 0, nr_new
= 0, err
= 0;
549 /* Get corresponding chunk */
550 for (i
= 0; i
< ino_dfr
->nr_chunks
; i
++) {
551 chunk
= &(ino_dfr
->chunks
[i
]);
553 if ((chunk
->f_clus
<= clu_offset
) && (clu_offset
< chunk
->f_clus
+ chunk
->nr_clus
)) {
554 /* For already allocated new_clus */
555 if (sbi
->dfr_new_clus
[chunk
->new_idx
+ clu_offset
- chunk
->f_clus
]) {
556 *clu
= sbi
->dfr_new_clus
[chunk
->new_idx
+ clu_offset
- chunk
->f_clus
];
564 fscore_set_vol_flags(sb
, VOL_DIRTY
, 0);
566 new_clu
.dir
= CLUS_EOF
;
568 new_clu
.flags
= fid
->flags
;
570 /* Allocate new cluster */
571 #ifdef CONFIG_SDFAT_DFR_PACKING
572 if (amap
->n_clean_au
* DFR_FULL_RATIO
<= amap
->n_au
* DFR_DEFAULT_PACKING_RATIO
)
573 num
= fsi
->fs_func
->alloc_cluster(sb
, 1, &new_clu
, ALLOC_COLD_PACKING
);
575 num
= fsi
->fs_func
->alloc_cluster(sb
, 1, &new_clu
, ALLOC_COLD_ALIGNED
);
577 num
= fsi
->fs_func
->alloc_cluster(sb
, 1, &new_clu
, ALLOC_COLD_ALIGNED
);
581 dfr_err("Map: num %d", num
);
585 /* Decrease reserved cluster count */
586 defrag_reserve_clusters(sb
, -1);
588 /* Add new_clus info in ino_dfr */
589 sbi
->dfr_new_clus
[chunk
->new_idx
+ clu_offset
- chunk
->f_clus
] = new_clu
.dir
;
591 /* Make FAT-chain for new_clus */
592 for (i
= 0; i
< chunk
->nr_clus
; i
++) {
594 if (sbi
->dfr_new_clus
[chunk
->new_idx
+ i
])
599 if (!sbi
->dfr_new_clus
[chunk
->new_idx
+ i
])
604 if (nr_new
== chunk
->nr_clus
) {
605 for (i
= 0; i
< chunk
->nr_clus
- 1; i
++) {
606 FAT32_CHECK_CLUSTER(fsi
, sbi
->dfr_new_clus
[chunk
->new_idx
+ i
], err
);
609 sbi
->dfr_new_clus
[chunk
->new_idx
+ i
],
610 sbi
->dfr_new_clus
[chunk
->new_idx
+ i
+ 1]))
621 * @fn defrag_writepage_end_io
622 * @brief check WB status of requested page
627 defrag_writepage_end_io(
628 INOUT
struct page
*page
)
630 struct super_block
*sb
= page
->mapping
->host
->i_sb
;
631 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
632 struct defrag_info
*ino_dfr
= &(SDFAT_I(page
->mapping
->host
)->dfr_info
);
633 unsigned int clus_start
= 0, clus_end
= 0;
636 /* Check if this inode is on defrag */
637 if (atomic_read(&ino_dfr
->stat
) != DFR_INO_STAT_REQ
)
640 clus_start
= page
->index
/ PAGES_PER_CLUS(sb
);
641 clus_end
= clus_start
+ 1;
643 /* Check each chunk in given inode */
644 for (i
= 0; i
< ino_dfr
->nr_chunks
; i
++) {
645 struct defrag_chunk_info
*chunk
= &(ino_dfr
->chunks
[i
]);
646 unsigned int chunk_start
= 0, chunk_end
= 0;
648 chunk_start
= chunk
->f_clus
;
649 chunk_end
= chunk
->f_clus
+ chunk
->nr_clus
;
651 if ((clus_start
>= chunk_start
) && (clus_end
<= chunk_end
)) {
652 int off
= clus_start
- chunk_start
;
654 clear_bit((page
->index
& (PAGES_PER_CLUS(sb
) - 1)),
655 (volatile unsigned long *)&(sbi
->dfr_page_wb
[chunk
->new_idx
+ off
]));
662 * @fn __defrag_check_wb
663 * @brief check if WB for given chunk completed
664 * @return 0 on success, -errno otherwise
665 * @param sbi super block info
666 * @param chunk given chunk
670 IN
struct sdfat_sb_info
*sbi
,
671 IN
struct defrag_chunk_info
*chunk
)
673 int err
= 0, wb_i
= 0, i
= 0, nr_new
= 0;
678 /* Check WB complete status first */
679 for (wb_i
= 0; wb_i
< chunk
->nr_clus
; wb_i
++) {
680 if (atomic_read((atomic_t
*)&(sbi
->dfr_page_wb
[chunk
->new_idx
+ wb_i
]))) {
687 * Check NEW_CLUS status.
688 * writepage_end_io cannot check whole WB complete status,
689 * so we need to check NEW_CLUS status.
691 for (i
= 0; i
< chunk
->nr_clus
; i
++)
692 if (sbi
->dfr_new_clus
[chunk
->new_idx
+ i
])
695 if (nr_new
== chunk
->nr_clus
) {
697 if ((wb_i
!= chunk
->nr_clus
) && (wb_i
!= chunk
->nr_clus
- 1))
698 dfr_debug("submit_fullpage_bio() called on a page (nr_clus %d, wb_i %d)",
699 chunk
->nr_clus
, wb_i
);
701 BUG_ON(nr_new
> chunk
->nr_clus
);
703 dfr_debug("nr_new %d, nr_clus %d", nr_new
, chunk
->nr_clus
);
707 /* Update chunk's state */
709 chunk
->stat
|= DFR_CHUNK_STAT_WB
;
716 __defrag_check_fat_old(
717 IN
struct super_block
*sb
,
718 IN
struct inode
*inode
,
719 IN
struct defrag_chunk_info
*chunk
)
721 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
722 unsigned int clus
= 0;
723 int err
= 0, idx
= 0, max_idx
= 0;
726 clus
= SDFAT_I(inode
)->fid
.start_clu
;
728 /* Follow FAT-chain */
729 #define num_clusters(val) ((val) ? (s32)((val - 1) >> fsi->cluster_size_bits) + 1 : 0)
730 max_idx
= num_clusters(SDFAT_I(inode
)->i_size_ondisk
);
731 for (idx
= 0; idx
< max_idx
; idx
++) {
733 FAT32_CHECK_CLUSTER(fsi
, clus
, err
);
735 err
= fat_ent_get(sb
, clus
, &clus
);
738 if ((idx
< max_idx
- 1) && (IS_CLUS_EOF(clus
) || IS_CLUS_FREE(clus
))) {
739 dfr_err("FAT: inode %p, max_idx %d, idx %d, clus %08x, "
740 "f_clus %d, nr_clus %d", inode
, max_idx
,
741 idx
, clus
, chunk
->f_clus
, chunk
->nr_clus
);
742 BUG_ON(idx
< max_idx
- 1);
753 __defrag_check_fat_new(
754 IN
struct super_block
*sb
,
755 IN
struct inode
*inode
,
756 IN
struct defrag_chunk_info
*chunk
)
758 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
759 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
760 unsigned int clus
= 0;
763 /* Check start of FAT-chain */
764 if (chunk
->prev_clus
) {
765 FAT32_CHECK_CLUSTER(fsi
, chunk
->prev_clus
, err
);
767 err
= fat_ent_get(sb
, chunk
->prev_clus
, &clus
);
770 clus
= SDFAT_I(inode
)->fid
.start_clu
;
772 if (sbi
->dfr_new_clus
[chunk
->new_idx
] != clus
) {
773 dfr_err("FAT: inode %p, start_clus %08x, read_clus %08x",
774 inode
, sbi
->dfr_new_clus
[chunk
->new_idx
], clus
);
779 /* Check inside of FAT-chain */
780 if (chunk
->nr_clus
> 1) {
781 for (i
= 0; i
< chunk
->nr_clus
- 1; i
++) {
782 FAT32_CHECK_CLUSTER(fsi
, sbi
->dfr_new_clus
[chunk
->new_idx
+ i
], err
);
784 err
= fat_ent_get(sb
, sbi
->dfr_new_clus
[chunk
->new_idx
+ i
], &clus
);
786 if (sbi
->dfr_new_clus
[chunk
->new_idx
+ i
+ 1] != clus
) {
787 dfr_err("FAT: inode %p, new_clus %08x, read_clus %08x",
788 inode
, sbi
->dfr_new_clus
[chunk
->new_idx
], clus
);
796 /* Check end of FAT-chain */
797 FAT32_CHECK_CLUSTER(fsi
, sbi
->dfr_new_clus
[chunk
->new_idx
+ chunk
->nr_clus
- 1], err
);
799 err
= fat_ent_get(sb
, sbi
->dfr_new_clus
[chunk
->new_idx
+ chunk
->nr_clus
- 1], &clus
);
801 if ((chunk
->next_clus
& 0x0FFFFFFF) != (clus
& 0x0FFFFFFF)) {
802 dfr_err("FAT: inode %p, next_clus %08x, read_clus %08x", inode
, chunk
->next_clus
, clus
);
812 * @fn __defrag_update_dirent
813 * @brief update DIR entry for defrag req
815 * @param sb super block
816 * @param chunk given chunk
819 __defrag_update_dirent(
820 struct super_block
*sb
,
821 struct defrag_chunk_info
*chunk
)
823 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
824 FS_INFO_T
*fsi
= &SDFAT_SB(sb
)->fsi
;
826 DOS_DENTRY_T
*dos_ep
;
827 unsigned int entry
= 0, sector
= 0;
828 unsigned short hi
= 0, lo
= 0;
831 dir
.dir
= GET64_HI(chunk
->i_pos
);
832 dir
.flags
= 0x1; // Assume non-continuous
834 entry
= GET64_LO(chunk
->i_pos
);
836 FAT32_CHECK_CLUSTER(fsi
, dir
.dir
, err
);
838 dos_ep
= (DOS_DENTRY_T
*) get_dentry_in_dir(sb
, &dir
, entry
, §or
);
840 hi
= GET32_HI(sbi
->dfr_new_clus
[chunk
->new_idx
]);
841 lo
= GET32_LO(sbi
->dfr_new_clus
[chunk
->new_idx
]);
843 dos_ep
->start_clu_hi
= cpu_to_le16(hi
);
844 dos_ep
->start_clu_lo
= cpu_to_le16(lo
);
846 dcache_modify(sb
, sector
);
851 * @fn defrag_update_fat_prev
852 * @brief update FAT chain for defrag requests
854 * @param sb super block
855 * @param force flag to force FAT update
856 * @remark protected by super_block and volume lock
859 defrag_update_fat_prev(
860 struct super_block
*sb
,
863 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
864 FS_INFO_T
*fsi
= &(sbi
->fsi
);
865 struct defrag_info
*sb_dfr
= &sbi
->dfr_info
, *ino_dfr
= NULL
;
866 int skip
= 0, done
= 0;
868 /* Check if FS_ERROR occurred */
869 if (sb
->s_flags
& MS_RDONLY
) {
870 dfr_err("RDONLY partition (err %d)", -EPERM
);
874 list_for_each_entry(ino_dfr
, &sb_dfr
->entry
, entry
) {
875 struct inode
*inode
= &(container_of(ino_dfr
, struct sdfat_inode_info
, dfr_info
)->vfs_inode
);
876 struct sdfat_inode_info
*ino_info
= SDFAT_I(inode
);
877 struct defrag_chunk_info
*chunk_prev
= NULL
;
880 mutex_lock(&ino_dfr
->lock
);
881 BUG_ON(atomic_read(&ino_dfr
->stat
) != DFR_INO_STAT_REQ
);
882 for (i
= 0; i
< ino_dfr
->nr_chunks
; i
++) {
883 struct defrag_chunk_info
*chunk
= NULL
;
886 chunk
= &(ino_dfr
->chunks
[i
]);
889 /* Do nothing for already passed chunk */
890 if (chunk
->stat
== DFR_CHUNK_STAT_PASS
) {
895 /* Handle error case */
896 if (chunk
->stat
== DFR_CHUNK_STAT_ERR
) {
901 /* Double-check clusters */
903 (chunk
->f_clus
== chunk_prev
->f_clus
+ chunk_prev
->nr_clus
) &&
904 (chunk_prev
->stat
== DFR_CHUNK_STAT_PASS
)) {
906 err
= defrag_validate_cluster(inode
, chunk
, 1);
908 /* Handle continuous chunks in a file */
911 sbi
->dfr_new_clus
[chunk_prev
->new_idx
+ chunk_prev
->nr_clus
- 1];
912 dfr_debug("prev->f_clus %d, prev->nr_clus %d, chunk->f_clus %d",
913 chunk_prev
->f_clus
, chunk_prev
->nr_clus
, chunk
->f_clus
);
916 err
= defrag_validate_cluster(inode
, chunk
, 0);
920 dfr_err("Cluster validation: inode %p, chunk->f_clus %d, err %d",
921 inode
, chunk
->f_clus
, err
);
926 * Skip update_fat_prev if WB or update_fat_next not completed.
927 * Go to error case if FORCE set.
929 if (__defrag_check_wb(sbi
, chunk
) || (chunk
->stat
!= DFR_CHUNK_STAT_PREP
)) {
932 dfr_err("Skip case: inode %p, stat %x, f_clus %d, err %d",
933 inode
, chunk
->stat
, chunk
->f_clus
, err
);
940 #ifdef CONFIG_SDFAT_DFR_DEBUG
942 defrag_spo_test(sb
, DFR_SPO_RANDOM
, __func__
);
945 /* Update chunk's previous cluster */
946 if (chunk
->prev_clus
== 0) {
947 /* For the first cluster of a file */
948 /* Update ino_info->fid.start_clu */
949 ino_info
->fid
.start_clu
= sbi
->dfr_new_clus
[chunk
->new_idx
];
950 __defrag_update_dirent(sb
, chunk
);
952 FAT32_CHECK_CLUSTER(fsi
, chunk
->prev_clus
, err
);
956 sbi
->dfr_new_clus
[chunk
->new_idx
])) {
962 /* Clear extent cache */
963 extent_cache_inval_inode(inode
);
965 /* Update FID info */
966 ino_info
->fid
.hint_bmap
.off
= -1;
967 ino_info
->fid
.hint_bmap
.clu
= 0;
969 /* Clear old FAT-chain */
970 for (j
= 0; j
< chunk
->nr_clus
; j
++)
971 defrag_free_cluster(sb
, chunk
->d_clus
+ j
);
973 /* Mark this chunk PASS */
974 chunk
->stat
= DFR_CHUNK_STAT_PASS
;
975 __defrag_check_fat_new(sb
, inode
, chunk
);
982 * chunk->new_idx != 0 means this chunk needs to be cleaned up
984 if (chunk
->new_idx
) {
985 /* Free already allocated clusters */
986 for (j
= 0; j
< chunk
->nr_clus
; j
++) {
987 if (sbi
->dfr_new_clus
[chunk
->new_idx
+ j
]) {
988 defrag_free_cluster(sb
, sbi
->dfr_new_clus
[chunk
->new_idx
+ j
]);
989 sbi
->dfr_new_clus
[chunk
->new_idx
+ j
] = 0;
993 __defrag_check_fat_old(sb
, inode
, chunk
);
997 * chunk->new_idx == 0 means this chunk already cleaned up
1000 chunk
->stat
= DFR_CHUNK_STAT_ERR
;
1005 BUG_ON(!mutex_is_locked(&ino_dfr
->lock
));
1006 mutex_unlock(&ino_dfr
->lock
);
1011 dfr_debug("%s skipped (nr_reqs %d, done %d, skip %d)",
1012 __func__
, sb_dfr
->nr_chunks
- 1, done
, skip
);
1014 /* Make dfr_reserved_clus zero */
1015 if (sbi
->dfr_reserved_clus
> 0) {
1016 if (fsi
->reserved_clusters
< sbi
->dfr_reserved_clus
) {
1017 dfr_err("Reserved count: reserved_clus %d, dfr_reserved_clus %d",
1018 fsi
->reserved_clusters
, sbi
->dfr_reserved_clus
);
1019 BUG_ON(fsi
->reserved_clusters
< sbi
->dfr_reserved_clus
);
1022 defrag_reserve_clusters(sb
, 0 - sbi
->dfr_reserved_clus
);
1025 dfr_debug("%s done (nr_reqs %d, done %d)", __func__
, sb_dfr
->nr_chunks
- 1, done
);
1031 * @fn defrag_update_fat_next
1032 * @brief update FAT chain for defrag requests
1034 * @param sb super block
1035 * @remark protected by super_block and volume lock
1038 defrag_update_fat_next(
1039 struct super_block
*sb
)
1041 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
1042 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
1043 struct defrag_info
*sb_dfr
= &sbi
->dfr_info
, *ino_dfr
= NULL
;
1044 struct defrag_chunk_info
*chunk
= NULL
;
1045 int done
= 0, i
= 0, j
= 0, err
= 0;
1047 /* Check if FS_ERROR occurred */
1048 if (sb
->s_flags
& MS_RDONLY
) {
1049 dfr_err("RDONLY partition (err %d)", -EROFS
);
1053 list_for_each_entry(ino_dfr
, &sb_dfr
->entry
, entry
) {
1055 for (i
= 0; i
< ino_dfr
->nr_chunks
; i
++) {
1058 chunk
= &(ino_dfr
->chunks
[i
]);
1060 /* Do nothing if error occurred or update_fat_next already passed */
1061 if (chunk
->stat
== DFR_CHUNK_STAT_ERR
)
1063 if (chunk
->stat
& DFR_CHUNK_STAT_FAT
) {
1068 /* Ship this chunk if get_block not passed for this chunk */
1069 for (j
= 0; j
< chunk
->nr_clus
; j
++) {
1070 if (sbi
->dfr_new_clus
[chunk
->new_idx
+ j
] == 0) {
1078 /* Update chunk's next cluster */
1079 FAT32_CHECK_CLUSTER(fsi
,
1080 sbi
->dfr_new_clus
[chunk
->new_idx
+ chunk
->nr_clus
- 1], err
);
1083 sbi
->dfr_new_clus
[chunk
->new_idx
+ chunk
->nr_clus
- 1],
1087 #ifdef CONFIG_SDFAT_DFR_DEBUG
1089 defrag_spo_test(sb
, DFR_SPO_RANDOM
, __func__
);
1092 /* Update chunk's state */
1093 chunk
->stat
|= DFR_CHUNK_STAT_FAT
;
1099 dfr_debug("%s done (nr_reqs %d, done %d)", __func__
, sb_dfr
->nr_chunks
- 1, done
);
1104 * @fn defrag_check_discard
1105 * @brief check if we can send discard for this AU, if so, send discard
1107 * @param sb super block
1108 * @remark protected by super_block and volume lock
1111 defrag_check_discard(
1112 IN
struct super_block
*sb
)
1114 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
1115 AMAP_T
*amap
= SDFAT_SB(sb
)->fsi
.amap
;
1116 AU_INFO_T
*au
= NULL
;
1117 struct defrag_info
*sb_dfr
= &(SDFAT_SB(sb
)->dfr_info
);
1118 unsigned int tmp
[DFR_MAX_AU_MOVED
];
1123 if (!(SDFAT_SB(sb
)->options
.discard
) ||
1124 !(SDFAT_SB(sb
)->options
.improved_allocation
& SDFAT_ALLOC_SMART
))
1127 memset(tmp
, 0, sizeof(int) * DFR_MAX_AU_MOVED
);
1129 for (i
= REQ_HEADER_IDX
+ 1; i
< sb_dfr
->nr_chunks
; i
++) {
1130 struct defrag_chunk_info
*chunk
= &(sb_dfr
->chunks
[i
]);
1133 au
= GET_AU(amap
, i_AU_of_CLU(amap
, chunk
->d_clus
));
1135 /* Send DISCARD for free AU */
1136 if ((IS_AU_IGNORED(au
, amap
)) &&
1137 (amap_get_freeclus(sb
, chunk
->d_clus
) == CLUS_PER_AU(sb
))) {
1138 sector_t blk
= 0, nr_blks
= 0;
1139 unsigned int au_align_factor
= amap
->option
.au_align_factor
% amap
->option
.au_size
;
1141 BUG_ON(au
->idx
== 0);
1143 /* Avoid multiple DISCARD */
1144 for (j
= 0; j
< DFR_MAX_AU_MOVED
; j
++) {
1145 if (tmp
[j
] == au
->idx
) {
1153 /* Send DISCARD cmd */
1154 blk
= (sector_t
) (((au
->idx
* CLUS_PER_AU(sb
)) << fsi
->sect_per_clus_bits
)
1156 nr_blks
= ((sector_t
)CLUS_PER_AU(sb
)) << fsi
->sect_per_clus_bits
;
1158 dfr_debug("Send DISCARD for AU[%d] (blk %08zx)", au
->idx
, blk
);
1159 sb_issue_discard(sb
, blk
, nr_blks
, GFP_NOFS
, 0);
1161 /* Save previous AU's index */
1162 for (j
= 0; j
< DFR_MAX_AU_MOVED
; j
++) {
1174 * @fn defrag_free_cluster
1175 * @brief free uneccessary cluster
1177 * @param sb super block
1178 * @param clus physical cluster num
1179 * @remark protected by super_block and volume lock
1182 defrag_free_cluster(
1183 struct super_block
*sb
,
1186 FS_INFO_T
*fsi
= &SDFAT_SB(sb
)->fsi
;
1187 unsigned int val
= 0;
1190 FAT32_CHECK_CLUSTER(fsi
, clus
, err
);
1192 if (fat_ent_get(sb
, clus
, &val
))
1195 if (fat_ent_set(sb
, clus
, 0))
1198 dfr_err("Free: Already freed, clus %08x, val %08x", clus
, val
);
1203 fsi
->used_clusters
--;
1205 amap_release_cluster(sb
, clus
);
1212 * @fn defrag_check_defrag_required
1213 * @brief check if defrag required
1214 * @return 1 if required, 0 otherwise
1215 * @param sb super block
1216 * @param totalau # of total AUs
1217 * @param cleanau # of clean AUs
1218 * @param fullau # of full AUs
1219 * @remark protected by super_block
1222 defrag_check_defrag_required(
1223 IN
struct super_block
*sb
,
1228 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
1229 AMAP_T
*amap
= NULL
;
1230 int clean_ratio
= 0, frag_ratio
= 0;
1233 if (!sb
|| !(SDFAT_SB(sb
)->options
.defrag
))
1236 /* Check DFR_DEFAULT_STOP_RATIO first */
1237 fsi
= &(SDFAT_SB(sb
)->fsi
);
1238 if (fsi
->used_clusters
== (unsigned int)(~0)) {
1239 if (fsi
->fs_func
->count_used_clusters(sb
, &fsi
->used_clusters
))
1242 if (fsi
->used_clusters
* DFR_FULL_RATIO
>= fsi
->num_clusters
* DFR_DEFAULT_STOP_RATIO
) {
1243 dfr_debug("used_clusters %d, num_clusters %d", fsi
->used_clusters
, fsi
->num_clusters
);
1247 /* Check clean/frag ratio */
1248 amap
= SDFAT_SB(sb
)->fsi
.amap
;
1251 clean_ratio
= (amap
->n_clean_au
* 100) / amap
->n_au
;
1252 if (amap
->n_full_au
)
1253 frag_ratio
= ((amap
->n_au
- amap
->n_clean_au
) * 100) / amap
->n_full_au
;
1255 frag_ratio
= ((amap
->n_au
- amap
->n_clean_au
) * 100) /
1256 (fsi
->used_clusters
* CLUS_PER_AU(sb
));
1259 * Wake-up defrag_daemon:
1260 * when # of clean AUs too small, or frag_ratio exceeds the limit
1262 if ((clean_ratio
< DFR_DEFAULT_WAKEUP_RATIO
) ||
1263 ((clean_ratio
< DFR_DEFAULT_CLEAN_RATIO
) && (frag_ratio
>= DFR_DEFAULT_FRAG_RATIO
))) {
1266 *totalau
= amap
->n_au
;
1268 *cleanau
= amap
->n_clean_au
;
1270 *fullau
= amap
->n_full_au
;
1279 * @fn defrag_check_defrag_required
1280 * @brief check defrag status on inode
1281 * @return 1 if defrag in on, 0 otherwise
1282 * @param inode inode
1283 * @param start logical start addr
1284 * @param end logical end addr
1285 * @param cancel flag to cancel defrag
1286 * @param caller caller info
1289 defrag_check_defrag_on(
1290 INOUT
struct inode
*inode
,
1294 IN
const char *caller
)
1296 struct super_block
*sb
= inode
->i_sb
;
1297 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
1298 FS_INFO_T
*fsi
= &(sbi
->fsi
);
1299 struct defrag_info
*ino_dfr
= &(SDFAT_I(inode
)->dfr_info
);
1300 unsigned int clus_start
= 0, clus_end
= 0;
1303 if (!inode
|| (start
== end
))
1306 mutex_lock(&ino_dfr
->lock
);
1307 /* Check if this inode is on defrag */
1308 if (atomic_read(&ino_dfr
->stat
) == DFR_INO_STAT_REQ
) {
1310 clus_start
= start
>> (fsi
->cluster_size_bits
);
1311 clus_end
= (end
>> (fsi
->cluster_size_bits
)) +
1312 ((end
& (fsi
->cluster_size
- 1)) ? 1 : 0);
1314 if (!ino_dfr
->chunks
)
1317 /* Check each chunk in given inode */
1318 for (i
= 0; i
< ino_dfr
->nr_chunks
; i
++) {
1319 struct defrag_chunk_info
*chunk
= &(ino_dfr
->chunks
[i
]);
1320 unsigned int chunk_start
= 0, chunk_end
= 0;
1322 /* Skip this chunk when error occurred or it already passed defrag process */
1323 if ((chunk
->stat
== DFR_CHUNK_STAT_ERR
) || (chunk
->stat
== DFR_CHUNK_STAT_PASS
))
1326 chunk_start
= chunk
->f_clus
;
1327 chunk_end
= chunk
->f_clus
+ chunk
->nr_clus
;
1329 if (((clus_start
>= chunk_start
) && (clus_start
< chunk_end
)) ||
1330 ((clus_end
> chunk_start
) && (clus_end
<= chunk_end
)) ||
1331 ((clus_start
< chunk_start
) && (clus_end
> chunk_end
))) {
1334 chunk
->stat
= DFR_CHUNK_STAT_ERR
;
1335 dfr_debug("Defrag canceled: inode %p, start %08x, end %08x, caller %s",
1336 inode
, clus_start
, clus_end
, caller
);
1343 BUG_ON(!mutex_is_locked(&ino_dfr
->lock
));
1344 mutex_unlock(&ino_dfr
->lock
);
1349 #ifdef CONFIG_SDFAT_DFR_DEBUG
1351 * @fn defrag_spo_test
1352 * @brief test SPO while defrag running
1354 * @param sb super block
1355 * @param flag SPO debug flag
1356 * @param caller caller info
1360 struct super_block
*sb
,
1364 struct sdfat_sb_info
*sbi
= SDFAT_SB(sb
);
1366 if (!sb
|| !(SDFAT_SB(sb
)->options
.defrag
))
1369 if (flag
== sbi
->dfr_spo_flag
) {
1370 dfr_err("Defrag SPO test (flag %d, caller %s)", flag
, caller
);
1371 panic("Defrag SPO test");
1374 #endif /* CONFIG_SDFAT_DFR_DEBUG */
1377 #endif /* CONFIG_SDFAT_DFR */