fs: Add sdfat
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / fs / sdfat / dfr.c
1 /*
2 * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18 /************************************************************************/
19 /* */
20 /* @PROJECT : exFAT & FAT12/16/32 File System */
21 /* @FILE : dfr.c */
22 /* @PURPOSE : Defragmentation support for SDFAT32 */
23 /* */
24 /*----------------------------------------------------------------------*/
25 /* NOTES */
26 /* */
27 /* */
28 /************************************************************************/
29
30 #include <linux/version.h>
31 #include <linux/list.h>
32 #include <linux/blkdev.h>
33
34 #include "sdfat.h"
35 #include "core.h"
36 #include "amap_smart.h"
37
38 #ifdef CONFIG_SDFAT_DFR
39 /**
40 * @fn defrag_get_info
41 * @brief get HW params for defrag daemon
42 * @return 0 on success, -errno otherwise
43 * @param sb super block
44 * @param arg defrag info arguments
45 * @remark protected by super_block
46 */
47 int
48 defrag_get_info(
49 IN struct super_block *sb,
50 OUT struct defrag_info_arg *arg)
51 {
52 FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
53 AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
54
55 if (!arg)
56 return -EINVAL;
57
58 arg->sec_sz = sb->s_blocksize;
59 arg->clus_sz = fsi->cluster_size;
60 arg->total_sec = fsi->num_sectors;
61 arg->fat_offset_sec = fsi->FAT1_start_sector;
62 arg->fat_sz_sec = fsi->num_FAT_sectors;
63 arg->n_fat = (fsi->FAT1_start_sector == fsi->FAT2_start_sector) ? 1 : 2;
64
65 arg->sec_per_au = amap->option.au_size;
66 arg->hidden_sectors = amap->option.au_align_factor % amap->option.au_size;
67
68 return 0;
69 }
70
71
72 static int
73 __defrag_scan_dir(
74 IN struct super_block *sb,
75 IN DOS_DENTRY_T *dos_ep,
76 IN loff_t i_pos,
77 OUT struct defrag_trav_arg *arg)
78 {
79 FS_INFO_T *fsi = NULL;
80 UNI_NAME_T uniname;
81 unsigned int type = 0, start_clus = 0;
82 int err = -EPERM;
83
84 /* Check params */
85 ERR_HANDLE2((!sb || !dos_ep || !i_pos || !arg), err, -EINVAL);
86 fsi = &(SDFAT_SB(sb)->fsi);
87
88 /* Get given entry's type */
89 type = fsi->fs_func->get_entry_type((DENTRY_T *) dos_ep);
90
91 /* Check dos_ep */
92 if (!strncmp(dos_ep->name, DOS_CUR_DIR_NAME, DOS_NAME_LENGTH)) {
93 ;
94 } else if (!strncmp(dos_ep->name, DOS_PAR_DIR_NAME, DOS_NAME_LENGTH)) {
95 ;
96 } else if ((type == TYPE_DIR) || (type == TYPE_FILE)) {
97
98 /* Set start_clus */
99 SET32_HI(start_clus, le16_to_cpu(dos_ep->start_clu_hi));
100 SET32_LO(start_clus, le16_to_cpu(dos_ep->start_clu_lo));
101 arg->start_clus = start_clus;
102
103 /* Set type & i_pos */
104 if (type == TYPE_DIR)
105 arg->type = DFR_TRAV_TYPE_DIR;
106 else
107 arg->type = DFR_TRAV_TYPE_FILE;
108
109 arg->i_pos = i_pos;
110
111 /* Set name */
112 memset(&uniname, 0, sizeof(UNI_NAME_T));
113 get_uniname_from_dos_entry(sb, dos_ep, &uniname, 0x1);
114 /* FIXME :
115 * we should think that whether the size of arg->name
116 * is enough or not
117 */
118 nls_uni16s_to_vfsname(sb, &uniname,
119 arg->name, sizeof(arg->name));
120
121 err = 0;
122 /* End case */
123 } else if (type == TYPE_UNUSED) {
124 err = -ENOENT;
125 } else {
126 ;
127 }
128
129 error:
130 return err;
131 }
132
133
134 /**
135 * @fn defrag_scan_dir
136 * @brief scan given directory
137 * @return 0 on success, -errno otherwise
138 * @param sb super block
139 * @param args traverse args
140 * @remark protected by inode_lock, super_block and volume lock
141 */
142 int
143 defrag_scan_dir(
144 IN struct super_block *sb,
145 INOUT struct defrag_trav_arg *args)
146 {
147 struct sdfat_sb_info *sbi = NULL;
148 FS_INFO_T *fsi = NULL;
149 struct defrag_trav_header *header = NULL;
150 DOS_DENTRY_T *dos_ep;
151 CHAIN_T chain;
152 int dot_found = 0, args_idx = DFR_TRAV_HEADER_IDX + 1, clus = 0, index = 0;
153 int err = 0, j = 0;
154
155 /* Check params */
156 ERR_HANDLE2((!sb || !args), err, -EINVAL);
157 sbi = SDFAT_SB(sb);
158 fsi = &(sbi->fsi);
159 header = (struct defrag_trav_header *) args;
160
161 /* Exceptional case for ROOT */
162 if (header->i_pos == DFR_TRAV_ROOT_IPOS) {
163 header->start_clus = fsi->root_dir;
164 dfr_debug("IOC_DFR_TRAV for ROOT: start_clus %08x", header->start_clus);
165 dot_found = 1;
166 }
167
168 chain.dir = header->start_clus;
169 chain.size = 0;
170 chain.flags = 0;
171
172 /* Check if this is directory */
173 if (!dot_found) {
174 FAT32_CHECK_CLUSTER(fsi, chain.dir, err);
175 ERR_HANDLE(err);
176 dos_ep = (DOS_DENTRY_T *) get_dentry_in_dir(sb, &chain, 0, NULL);
177 ERR_HANDLE2(!dos_ep, err, -EIO);
178
179 if (strncmp(dos_ep->name, DOS_CUR_DIR_NAME, DOS_NAME_LENGTH)) {
180 err = -EINVAL;
181 dfr_err("Scan: Not a directory, err %d", err);
182 goto error;
183 }
184 }
185
186 /* For more-scan case */
187 if ((header->stat == DFR_TRAV_STAT_MORE) &&
188 (header->start_clus == sbi->dfr_hint_clus) &&
189 (sbi->dfr_hint_idx > 0)) {
190
191 index = sbi->dfr_hint_idx;
192 for (j = 0; j < (sbi->dfr_hint_idx / fsi->dentries_per_clu); j++) {
193 /* Follow FAT-chain */
194 FAT32_CHECK_CLUSTER(fsi, chain.dir, err);
195 ERR_HANDLE(err);
196 err = fat_ent_get(sb, chain.dir, &(chain.dir));
197 ERR_HANDLE(err);
198
199 if (!IS_CLUS_EOF(chain.dir)) {
200 clus++;
201 index -= fsi->dentries_per_clu;
202 } else {
203 /**
204 * This directory modified. Stop scanning.
205 */
206 err = -EINVAL;
207 dfr_err("Scan: SCAN_MORE failed, err %d", err);
208 goto error;
209 }
210 }
211
212 /* For first-scan case */
213 } else {
214 clus = 0;
215 index = 0;
216 }
217
218 scan_fat_chain:
219 /* Scan given directory and get info of children */
220 for ( ; index < fsi->dentries_per_clu; index++) {
221 DOS_DENTRY_T *dos_ep = NULL;
222 loff_t i_pos = 0;
223
224 /* Get dos_ep */
225 FAT32_CHECK_CLUSTER(fsi, chain.dir, err);
226 ERR_HANDLE(err);
227 dos_ep = (DOS_DENTRY_T *) get_dentry_in_dir(sb, &chain, index, NULL);
228 ERR_HANDLE2(!dos_ep, err, -EIO);
229
230 /* Make i_pos for this entry */
231 SET64_HI(i_pos, header->start_clus);
232 SET64_LO(i_pos, clus * fsi->dentries_per_clu + index);
233
234 err = __defrag_scan_dir(sb, dos_ep, i_pos, &args[args_idx]);
235 if (!err) {
236 /* More-scan case */
237 if (++args_idx >= (PAGE_SIZE / sizeof(struct defrag_trav_arg))) {
238 sbi->dfr_hint_clus = header->start_clus;
239 sbi->dfr_hint_idx = clus * fsi->dentries_per_clu + index + 1;
240
241 header->stat = DFR_TRAV_STAT_MORE;
242 header->nr_entries = args_idx;
243 goto error;
244 }
245 /* Error case */
246 } else if (err == -EINVAL) {
247 sbi->dfr_hint_clus = sbi->dfr_hint_idx = 0;
248 dfr_err("Scan: err %d", err);
249 goto error;
250 /* End case */
251 } else if (err == -ENOENT) {
252 sbi->dfr_hint_clus = sbi->dfr_hint_idx = 0;
253 err = 0;
254 goto done;
255 } else {
256 /* DO NOTHING */
257 }
258 err = 0;
259 }
260
261 /* Follow FAT-chain */
262 FAT32_CHECK_CLUSTER(fsi, chain.dir, err);
263 ERR_HANDLE(err);
264 err = fat_ent_get(sb, chain.dir, &(chain.dir));
265 ERR_HANDLE(err);
266
267 if (!IS_CLUS_EOF(chain.dir)) {
268 index = 0;
269 clus++;
270 goto scan_fat_chain;
271 }
272
273 done:
274 /* Update header */
275 header->stat = DFR_TRAV_STAT_DONE;
276 header->nr_entries = args_idx;
277
278 error:
279 return err;
280 }
281
282
283 static int
284 __defrag_validate_cluster_prev(
285 IN struct super_block *sb,
286 IN struct defrag_chunk_info *chunk)
287 {
288 FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
289 CHAIN_T dir;
290 DENTRY_T *ep = NULL;
291 unsigned int entry = 0, clus = 0;
292 int err = 0;
293
294 if (chunk->prev_clus == 0) {
295 /* For the first cluster of a file */
296 dir.dir = GET64_HI(chunk->i_pos);
297 dir.flags = 0x1; // Assume non-continuous
298
299 entry = GET64_LO(chunk->i_pos);
300
301 FAT32_CHECK_CLUSTER(fsi, dir.dir, err);
302 ERR_HANDLE(err);
303 ep = get_dentry_in_dir(sb, &dir, entry, NULL);
304 if (!ep) {
305 err = -EPERM;
306 goto error;
307 }
308
309 /* should call fat_get_entry_clu0(ep) */
310 clus = fsi->fs_func->get_entry_clu0(ep);
311 if (clus != chunk->d_clus) {
312 err = -ENXIO;
313 goto error;
314 }
315 } else {
316 /* Normal case */
317 FAT32_CHECK_CLUSTER(fsi, chunk->prev_clus, err);
318 ERR_HANDLE(err);
319 err = fat_ent_get(sb, chunk->prev_clus, &clus);
320 if (err)
321 goto error;
322 if (chunk->d_clus != clus)
323 err = -ENXIO;
324 }
325
326 error:
327 return err;
328 }
329
330
331 static int
332 __defrag_validate_cluster_next(
333 IN struct super_block *sb,
334 IN struct defrag_chunk_info *chunk)
335 {
336 FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
337 unsigned int clus = 0;
338 int err = 0;
339
340 /* Check next_clus */
341 FAT32_CHECK_CLUSTER(fsi, (chunk->d_clus + chunk->nr_clus - 1), err);
342 ERR_HANDLE(err);
343 err = fat_ent_get(sb, (chunk->d_clus + chunk->nr_clus - 1), &clus);
344 if (err)
345 goto error;
346 if (chunk->next_clus != (clus & FAT32_EOF))
347 err = -ENXIO;
348
349 error:
350 return err;
351 }
352
353
354 /**
355 * @fn __defrag_check_au
356 * @brief check if this AU is in use
357 * @return 0 if idle, 1 if busy
358 * @param sb super block
359 * @param clus physical cluster num
360 * @param limit # of used clusters from daemon
361 */
362 static int
363 __defrag_check_au(
364 struct super_block *sb,
365 u32 clus,
366 u32 limit)
367 {
368 unsigned int nr_free = amap_get_freeclus(sb, clus);
369
370 #if defined(CONFIG_SDFAT_DFR_DEBUG) && defined(CONFIG_SDFAT_DBG_MSG)
371 if (nr_free < limit) {
372 AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
373 AU_INFO_T *au = GET_AU(amap, i_AU_of_CLU(amap, clus));
374
375 dfr_debug("AU[%d] nr_free %d, limit %d", au->idx, nr_free, limit);
376 }
377 #endif
378 return ((nr_free < limit) ? 1 : 0);
379 }
380
381
382 /**
383 * @fn defrag_validate_cluster
384 * @brief validate cluster info of given chunk
385 * @return 0 on success, -errno otherwise
386 * @param inode inode of given chunk
387 * @param chunk given chunk
388 * @param skip_prev flag to skip checking previous cluster info
389 * @remark protected by super_block and volume lock
390 */
391 int
392 defrag_validate_cluster(
393 IN struct inode *inode,
394 IN struct defrag_chunk_info *chunk,
395 IN int skip_prev)
396 {
397 struct super_block *sb = inode->i_sb;
398 FILE_ID_T *fid = &(SDFAT_I(inode)->fid);
399 unsigned int clus = 0;
400 int err = 0, i = 0;
401
402 /* If this inode is unlink-ed, skip it */
403 if (fid->dir.dir == DIR_DELETED)
404 return -ENOENT;
405
406 /* Skip working-AU */
407 err = amap_check_working(sb, chunk->d_clus);
408 if (err)
409 return -EBUSY;
410
411 /* Check # of free_clus of belonged AU */
412 err = __defrag_check_au(inode->i_sb, chunk->d_clus, CLUS_PER_AU(sb) - chunk->au_clus);
413 if (err)
414 return -EINVAL;
415
416 /* Check chunk's clusters */
417 for (i = 0; i < chunk->nr_clus; i++) {
418 err = fsapi_map_clus(inode, chunk->f_clus + i, &clus, ALLOC_NOWHERE);
419 if (err || (chunk->d_clus + i != clus)) {
420 if (!err)
421 err = -ENXIO;
422 goto error;
423 }
424 }
425
426 /* Check next_clus */
427 err = __defrag_validate_cluster_next(sb, chunk);
428 ERR_HANDLE(err);
429
430 if (!skip_prev) {
431 /* Check prev_clus */
432 err = __defrag_validate_cluster_prev(sb, chunk);
433 ERR_HANDLE(err);
434 }
435
436 error:
437 return err;
438 }
439
440
441 /**
442 * @fn defrag_reserve_clusters
443 * @brief reserve clusters for defrag
444 * @return 0 on success, -errno otherwise
445 * @param sb super block
446 * @param nr_clus # of clusters to reserve
447 * @remark protected by super_block and volume lock
448 */
449 int
450 defrag_reserve_clusters(
451 INOUT struct super_block *sb,
452 IN int nr_clus)
453 {
454 struct sdfat_sb_info *sbi = SDFAT_SB(sb);
455 FS_INFO_T *fsi = &(sbi->fsi);
456
457 if (!(sbi->options.improved_allocation & SDFAT_ALLOC_DELAY))
458 /* Nothing to do */
459 return 0;
460
461 /* Update used_clusters */
462 if (fsi->used_clusters == (u32) ~0) {
463 if (fsi->fs_func->count_used_clusters(sb, &fsi->used_clusters))
464 return -EIO;
465 }
466
467 /* Check error case */
468 if (fsi->used_clusters + fsi->reserved_clusters + nr_clus >= fsi->num_clusters - 2) {
469 return -ENOSPC;
470 } else if (fsi->reserved_clusters + nr_clus < 0) {
471 dfr_err("Reserve count: reserved_clusters %d, nr_clus %d",
472 fsi->reserved_clusters, nr_clus);
473 BUG_ON(fsi->reserved_clusters + nr_clus < 0);
474 }
475
476 sbi->dfr_reserved_clus += nr_clus;
477 fsi->reserved_clusters += nr_clus;
478
479 return 0;
480 }
481
482
483 /**
484 * @fn defrag_mark_ignore
485 * @brief mark corresponding AU to be ignored
486 * @return 0 on success, -errno otherwise
487 * @param sb super block
488 * @param clus given cluster num
489 * @remark protected by super_block
490 */
491 int
492 defrag_mark_ignore(
493 INOUT struct super_block *sb,
494 IN unsigned int clus)
495 {
496 int err = 0;
497
498 if (SDFAT_SB(sb)->options.improved_allocation & SDFAT_ALLOC_SMART)
499 err = amap_mark_ignore(sb, clus);
500
501 if (err)
502 dfr_debug("err %d", err);
503 return err;
504 }
505
506
507 /**
508 * @fn defrag_unmark_ignore_all
509 * @brief unmark all ignored AUs
510 * @return void
511 * @param sb super block
512 * @remark protected by super_block
513 */
514 void
515 defrag_unmark_ignore_all(struct super_block *sb)
516 {
517 if (SDFAT_SB(sb)->options.improved_allocation & SDFAT_ALLOC_SMART)
518 amap_unmark_ignore_all(sb);
519 }
520
521
522 /**
523 * @fn defrag_map_cluster
524 * @brief get_block function for defrag dests
525 * @return 0 on success, -errno otherwise
526 * @param inode inode
527 * @param clu_offset logical cluster offset
528 * @param clu mapped cluster (physical)
529 * @remark protected by super_block and volume lock
530 */
531 int
532 defrag_map_cluster(
533 struct inode *inode,
534 unsigned int clu_offset,
535 unsigned int *clu)
536 {
537 struct super_block *sb = inode->i_sb;
538 struct sdfat_sb_info *sbi = SDFAT_SB(sb);
539 FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
540 #ifdef CONFIG_SDFAT_DFR_PACKING
541 AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
542 #endif
543 FILE_ID_T *fid = &(SDFAT_I(inode)->fid);
544 struct defrag_info *ino_dfr = &(SDFAT_I(inode)->dfr_info);
545 struct defrag_chunk_info *chunk = NULL;
546 CHAIN_T new_clu;
547 int num = 0, i = 0, nr_new = 0, err = 0;
548
549 /* Get corresponding chunk */
550 for (i = 0; i < ino_dfr->nr_chunks; i++) {
551 chunk = &(ino_dfr->chunks[i]);
552
553 if ((chunk->f_clus <= clu_offset) && (clu_offset < chunk->f_clus + chunk->nr_clus)) {
554 /* For already allocated new_clus */
555 if (sbi->dfr_new_clus[chunk->new_idx + clu_offset - chunk->f_clus]) {
556 *clu = sbi->dfr_new_clus[chunk->new_idx + clu_offset - chunk->f_clus];
557 return 0;
558 }
559 break;
560 }
561 }
562 BUG_ON(!chunk);
563
564 fscore_set_vol_flags(sb, VOL_DIRTY, 0);
565
566 new_clu.dir = CLUS_EOF;
567 new_clu.size = 0;
568 new_clu.flags = fid->flags;
569
570 /* Allocate new cluster */
571 #ifdef CONFIG_SDFAT_DFR_PACKING
572 if (amap->n_clean_au * DFR_FULL_RATIO <= amap->n_au * DFR_DEFAULT_PACKING_RATIO)
573 num = fsi->fs_func->alloc_cluster(sb, 1, &new_clu, ALLOC_COLD_PACKING);
574 else
575 num = fsi->fs_func->alloc_cluster(sb, 1, &new_clu, ALLOC_COLD_ALIGNED);
576 #else
577 num = fsi->fs_func->alloc_cluster(sb, 1, &new_clu, ALLOC_COLD_ALIGNED);
578 #endif
579
580 if (num != 1) {
581 dfr_err("Map: num %d", num);
582 return -EIO;
583 }
584
585 /* Decrease reserved cluster count */
586 defrag_reserve_clusters(sb, -1);
587
588 /* Add new_clus info in ino_dfr */
589 sbi->dfr_new_clus[chunk->new_idx + clu_offset - chunk->f_clus] = new_clu.dir;
590
591 /* Make FAT-chain for new_clus */
592 for (i = 0; i < chunk->nr_clus; i++) {
593 #if 0
594 if (sbi->dfr_new_clus[chunk->new_idx + i])
595 nr_new++;
596 else
597 break;
598 #else
599 if (!sbi->dfr_new_clus[chunk->new_idx + i])
600 break;
601 nr_new++;
602 #endif
603 }
604 if (nr_new == chunk->nr_clus) {
605 for (i = 0; i < chunk->nr_clus - 1; i++) {
606 FAT32_CHECK_CLUSTER(fsi, sbi->dfr_new_clus[chunk->new_idx + i], err);
607 BUG_ON(err);
608 if (fat_ent_set(sb,
609 sbi->dfr_new_clus[chunk->new_idx + i],
610 sbi->dfr_new_clus[chunk->new_idx + i + 1]))
611 return -EIO;
612 }
613 }
614
615 *clu = new_clu.dir;
616 return 0;
617 }
618
619
620 /**
621 * @fn defrag_writepage_end_io
622 * @brief check WB status of requested page
623 * @return void
624 * @param page page
625 */
626 void
627 defrag_writepage_end_io(
628 INOUT struct page *page)
629 {
630 struct super_block *sb = page->mapping->host->i_sb;
631 struct sdfat_sb_info *sbi = SDFAT_SB(sb);
632 struct defrag_info *ino_dfr = &(SDFAT_I(page->mapping->host)->dfr_info);
633 unsigned int clus_start = 0, clus_end = 0;
634 int i = 0;
635
636 /* Check if this inode is on defrag */
637 if (atomic_read(&ino_dfr->stat) != DFR_INO_STAT_REQ)
638 return;
639
640 clus_start = page->index / PAGES_PER_CLUS(sb);
641 clus_end = clus_start + 1;
642
643 /* Check each chunk in given inode */
644 for (i = 0; i < ino_dfr->nr_chunks; i++) {
645 struct defrag_chunk_info *chunk = &(ino_dfr->chunks[i]);
646 unsigned int chunk_start = 0, chunk_end = 0;
647
648 chunk_start = chunk->f_clus;
649 chunk_end = chunk->f_clus + chunk->nr_clus;
650
651 if ((clus_start >= chunk_start) && (clus_end <= chunk_end)) {
652 int off = clus_start - chunk_start;
653
654 clear_bit((page->index & (PAGES_PER_CLUS(sb) - 1)),
655 (volatile unsigned long *)&(sbi->dfr_page_wb[chunk->new_idx + off]));
656 }
657 }
658 }
659
660
661 /**
662 * @fn __defrag_check_wb
663 * @brief check if WB for given chunk completed
664 * @return 0 on success, -errno otherwise
665 * @param sbi super block info
666 * @param chunk given chunk
667 */
668 static int
669 __defrag_check_wb(
670 IN struct sdfat_sb_info *sbi,
671 IN struct defrag_chunk_info *chunk)
672 {
673 int err = 0, wb_i = 0, i = 0, nr_new = 0;
674
675 if (!sbi || !chunk)
676 return -EINVAL;
677
678 /* Check WB complete status first */
679 for (wb_i = 0; wb_i < chunk->nr_clus; wb_i++) {
680 if (atomic_read((atomic_t *)&(sbi->dfr_page_wb[chunk->new_idx + wb_i]))) {
681 err = -EBUSY;
682 break;
683 }
684 }
685
686 /**
687 * Check NEW_CLUS status.
688 * writepage_end_io cannot check whole WB complete status,
689 * so we need to check NEW_CLUS status.
690 */
691 for (i = 0; i < chunk->nr_clus; i++)
692 if (sbi->dfr_new_clus[chunk->new_idx + i])
693 nr_new++;
694
695 if (nr_new == chunk->nr_clus) {
696 err = 0;
697 if ((wb_i != chunk->nr_clus) && (wb_i != chunk->nr_clus - 1))
698 dfr_debug("submit_fullpage_bio() called on a page (nr_clus %d, wb_i %d)",
699 chunk->nr_clus, wb_i);
700
701 BUG_ON(nr_new > chunk->nr_clus);
702 } else {
703 dfr_debug("nr_new %d, nr_clus %d", nr_new, chunk->nr_clus);
704 err = -EBUSY;
705 }
706
707 /* Update chunk's state */
708 if (!err)
709 chunk->stat |= DFR_CHUNK_STAT_WB;
710
711 return err;
712 }
713
714
715 static void
716 __defrag_check_fat_old(
717 IN struct super_block *sb,
718 IN struct inode *inode,
719 IN struct defrag_chunk_info *chunk)
720 {
721 FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
722 unsigned int clus = 0;
723 int err = 0, idx = 0, max_idx = 0;
724
725 /* Get start_clus */
726 clus = SDFAT_I(inode)->fid.start_clu;
727
728 /* Follow FAT-chain */
729 #define num_clusters(val) ((val) ? (s32)((val - 1) >> fsi->cluster_size_bits) + 1 : 0)
730 max_idx = num_clusters(SDFAT_I(inode)->i_size_ondisk);
731 for (idx = 0; idx < max_idx; idx++) {
732
733 FAT32_CHECK_CLUSTER(fsi, clus, err);
734 ERR_HANDLE(err);
735 err = fat_ent_get(sb, clus, &clus);
736 ERR_HANDLE(err);
737
738 if ((idx < max_idx - 1) && (IS_CLUS_EOF(clus) || IS_CLUS_FREE(clus))) {
739 dfr_err("FAT: inode %p, max_idx %d, idx %d, clus %08x, "
740 "f_clus %d, nr_clus %d", inode, max_idx,
741 idx, clus, chunk->f_clus, chunk->nr_clus);
742 BUG_ON(idx < max_idx - 1);
743 goto error;
744 }
745 }
746
747 error:
748 return;
749 }
750
751
752 static void
753 __defrag_check_fat_new(
754 IN struct super_block *sb,
755 IN struct inode *inode,
756 IN struct defrag_chunk_info *chunk)
757 {
758 struct sdfat_sb_info *sbi = SDFAT_SB(sb);
759 FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
760 unsigned int clus = 0;
761 int i = 0, err = 0;
762
763 /* Check start of FAT-chain */
764 if (chunk->prev_clus) {
765 FAT32_CHECK_CLUSTER(fsi, chunk->prev_clus, err);
766 BUG_ON(err);
767 err = fat_ent_get(sb, chunk->prev_clus, &clus);
768 BUG_ON(err);
769 } else {
770 clus = SDFAT_I(inode)->fid.start_clu;
771 }
772 if (sbi->dfr_new_clus[chunk->new_idx] != clus) {
773 dfr_err("FAT: inode %p, start_clus %08x, read_clus %08x",
774 inode, sbi->dfr_new_clus[chunk->new_idx], clus);
775 err = EIO;
776 goto error;
777 }
778
779 /* Check inside of FAT-chain */
780 if (chunk->nr_clus > 1) {
781 for (i = 0; i < chunk->nr_clus - 1; i++) {
782 FAT32_CHECK_CLUSTER(fsi, sbi->dfr_new_clus[chunk->new_idx + i], err);
783 BUG_ON(err);
784 err = fat_ent_get(sb, sbi->dfr_new_clus[chunk->new_idx + i], &clus);
785 BUG_ON(err);
786 if (sbi->dfr_new_clus[chunk->new_idx + i + 1] != clus) {
787 dfr_err("FAT: inode %p, new_clus %08x, read_clus %08x",
788 inode, sbi->dfr_new_clus[chunk->new_idx], clus);
789 err = EIO;
790 goto error;
791 }
792 }
793 clus = 0;
794 }
795
796 /* Check end of FAT-chain */
797 FAT32_CHECK_CLUSTER(fsi, sbi->dfr_new_clus[chunk->new_idx + chunk->nr_clus - 1], err);
798 BUG_ON(err);
799 err = fat_ent_get(sb, sbi->dfr_new_clus[chunk->new_idx + chunk->nr_clus - 1], &clus);
800 BUG_ON(err);
801 if ((chunk->next_clus & 0x0FFFFFFF) != (clus & 0x0FFFFFFF)) {
802 dfr_err("FAT: inode %p, next_clus %08x, read_clus %08x", inode, chunk->next_clus, clus);
803 err = EIO;
804 }
805
806 error:
807 BUG_ON(err);
808 }
809
810
811 /**
812 * @fn __defrag_update_dirent
813 * @brief update DIR entry for defrag req
814 * @return void
815 * @param sb super block
816 * @param chunk given chunk
817 */
818 static void
819 __defrag_update_dirent(
820 struct super_block *sb,
821 struct defrag_chunk_info *chunk)
822 {
823 struct sdfat_sb_info *sbi = SDFAT_SB(sb);
824 FS_INFO_T *fsi = &SDFAT_SB(sb)->fsi;
825 CHAIN_T dir;
826 DOS_DENTRY_T *dos_ep;
827 unsigned int entry = 0, sector = 0;
828 unsigned short hi = 0, lo = 0;
829 int err = 0;
830
831 dir.dir = GET64_HI(chunk->i_pos);
832 dir.flags = 0x1; // Assume non-continuous
833
834 entry = GET64_LO(chunk->i_pos);
835
836 FAT32_CHECK_CLUSTER(fsi, dir.dir, err);
837 BUG_ON(err);
838 dos_ep = (DOS_DENTRY_T *) get_dentry_in_dir(sb, &dir, entry, &sector);
839
840 hi = GET32_HI(sbi->dfr_new_clus[chunk->new_idx]);
841 lo = GET32_LO(sbi->dfr_new_clus[chunk->new_idx]);
842
843 dos_ep->start_clu_hi = cpu_to_le16(hi);
844 dos_ep->start_clu_lo = cpu_to_le16(lo);
845
846 dcache_modify(sb, sector);
847 }
848
849
850 /**
851 * @fn defrag_update_fat_prev
852 * @brief update FAT chain for defrag requests
853 * @return void
854 * @param sb super block
855 * @param force flag to force FAT update
856 * @remark protected by super_block and volume lock
857 */
858 void
859 defrag_update_fat_prev(
860 struct super_block *sb,
861 int force)
862 {
863 struct sdfat_sb_info *sbi = SDFAT_SB(sb);
864 FS_INFO_T *fsi = &(sbi->fsi);
865 struct defrag_info *sb_dfr = &sbi->dfr_info, *ino_dfr = NULL;
866 int skip = 0, done = 0;
867
868 /* Check if FS_ERROR occurred */
869 if (sb->s_flags & MS_RDONLY) {
870 dfr_err("RDONLY partition (err %d)", -EPERM);
871 goto out;
872 }
873
874 list_for_each_entry(ino_dfr, &sb_dfr->entry, entry) {
875 struct inode *inode = &(container_of(ino_dfr, struct sdfat_inode_info, dfr_info)->vfs_inode);
876 struct sdfat_inode_info *ino_info = SDFAT_I(inode);
877 struct defrag_chunk_info *chunk_prev = NULL;
878 int i = 0, j = 0;
879
880 mutex_lock(&ino_dfr->lock);
881 BUG_ON(atomic_read(&ino_dfr->stat) != DFR_INO_STAT_REQ);
882 for (i = 0; i < ino_dfr->nr_chunks; i++) {
883 struct defrag_chunk_info *chunk = NULL;
884 int err = 0;
885
886 chunk = &(ino_dfr->chunks[i]);
887 BUG_ON(!chunk);
888
889 /* Do nothing for already passed chunk */
890 if (chunk->stat == DFR_CHUNK_STAT_PASS) {
891 done++;
892 continue;
893 }
894
895 /* Handle error case */
896 if (chunk->stat == DFR_CHUNK_STAT_ERR) {
897 err = -EINVAL;
898 goto error;
899 }
900
901 /* Double-check clusters */
902 if (chunk_prev &&
903 (chunk->f_clus == chunk_prev->f_clus + chunk_prev->nr_clus) &&
904 (chunk_prev->stat == DFR_CHUNK_STAT_PASS)) {
905
906 err = defrag_validate_cluster(inode, chunk, 1);
907
908 /* Handle continuous chunks in a file */
909 if (!err) {
910 chunk->prev_clus =
911 sbi->dfr_new_clus[chunk_prev->new_idx + chunk_prev->nr_clus - 1];
912 dfr_debug("prev->f_clus %d, prev->nr_clus %d, chunk->f_clus %d",
913 chunk_prev->f_clus, chunk_prev->nr_clus, chunk->f_clus);
914 }
915 } else {
916 err = defrag_validate_cluster(inode, chunk, 0);
917 }
918
919 if (err) {
920 dfr_err("Cluster validation: inode %p, chunk->f_clus %d, err %d",
921 inode, chunk->f_clus, err);
922 goto error;
923 }
924
925 /**
926 * Skip update_fat_prev if WB or update_fat_next not completed.
927 * Go to error case if FORCE set.
928 */
929 if (__defrag_check_wb(sbi, chunk) || (chunk->stat != DFR_CHUNK_STAT_PREP)) {
930 if (force) {
931 err = -EPERM;
932 dfr_err("Skip case: inode %p, stat %x, f_clus %d, err %d",
933 inode, chunk->stat, chunk->f_clus, err);
934 goto error;
935 }
936 skip++;
937 continue;
938 }
939
940 #ifdef CONFIG_SDFAT_DFR_DEBUG
941 /* SPO test */
942 defrag_spo_test(sb, DFR_SPO_RANDOM, __func__);
943 #endif
944
945 /* Update chunk's previous cluster */
946 if (chunk->prev_clus == 0) {
947 /* For the first cluster of a file */
948 /* Update ino_info->fid.start_clu */
949 ino_info->fid.start_clu = sbi->dfr_new_clus[chunk->new_idx];
950 __defrag_update_dirent(sb, chunk);
951 } else {
952 FAT32_CHECK_CLUSTER(fsi, chunk->prev_clus, err);
953 BUG_ON(err);
954 if (fat_ent_set(sb,
955 chunk->prev_clus,
956 sbi->dfr_new_clus[chunk->new_idx])) {
957 err = -EIO;
958 goto error;
959 }
960 }
961
962 /* Clear extent cache */
963 extent_cache_inval_inode(inode);
964
965 /* Update FID info */
966 ino_info->fid.hint_bmap.off = -1;
967 ino_info->fid.hint_bmap.clu = 0;
968
969 /* Clear old FAT-chain */
970 for (j = 0; j < chunk->nr_clus; j++)
971 defrag_free_cluster(sb, chunk->d_clus + j);
972
973 /* Mark this chunk PASS */
974 chunk->stat = DFR_CHUNK_STAT_PASS;
975 __defrag_check_fat_new(sb, inode, chunk);
976
977 done++;
978
979 error:
980 if (err) {
981 /**
982 * chunk->new_idx != 0 means this chunk needs to be cleaned up
983 */
984 if (chunk->new_idx) {
985 /* Free already allocated clusters */
986 for (j = 0; j < chunk->nr_clus; j++) {
987 if (sbi->dfr_new_clus[chunk->new_idx + j]) {
988 defrag_free_cluster(sb, sbi->dfr_new_clus[chunk->new_idx + j]);
989 sbi->dfr_new_clus[chunk->new_idx + j] = 0;
990 }
991 }
992
993 __defrag_check_fat_old(sb, inode, chunk);
994 }
995
996 /**
997 * chunk->new_idx == 0 means this chunk already cleaned up
998 */
999 chunk->new_idx = 0;
1000 chunk->stat = DFR_CHUNK_STAT_ERR;
1001 }
1002
1003 chunk_prev = chunk;
1004 }
1005 BUG_ON(!mutex_is_locked(&ino_dfr->lock));
1006 mutex_unlock(&ino_dfr->lock);
1007 }
1008
1009 out:
1010 if (skip) {
1011 dfr_debug("%s skipped (nr_reqs %d, done %d, skip %d)",
1012 __func__, sb_dfr->nr_chunks - 1, done, skip);
1013 } else {
1014 /* Make dfr_reserved_clus zero */
1015 if (sbi->dfr_reserved_clus > 0) {
1016 if (fsi->reserved_clusters < sbi->dfr_reserved_clus) {
1017 dfr_err("Reserved count: reserved_clus %d, dfr_reserved_clus %d",
1018 fsi->reserved_clusters, sbi->dfr_reserved_clus);
1019 BUG_ON(fsi->reserved_clusters < sbi->dfr_reserved_clus);
1020 }
1021
1022 defrag_reserve_clusters(sb, 0 - sbi->dfr_reserved_clus);
1023 }
1024
1025 dfr_debug("%s done (nr_reqs %d, done %d)", __func__, sb_dfr->nr_chunks - 1, done);
1026 }
1027 }
1028
1029
1030 /**
1031 * @fn defrag_update_fat_next
1032 * @brief update FAT chain for defrag requests
1033 * @return void
1034 * @param sb super block
1035 * @remark protected by super_block and volume lock
1036 */
1037 void
1038 defrag_update_fat_next(
1039 struct super_block *sb)
1040 {
1041 struct sdfat_sb_info *sbi = SDFAT_SB(sb);
1042 FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
1043 struct defrag_info *sb_dfr = &sbi->dfr_info, *ino_dfr = NULL;
1044 struct defrag_chunk_info *chunk = NULL;
1045 int done = 0, i = 0, j = 0, err = 0;
1046
1047 /* Check if FS_ERROR occurred */
1048 if (sb->s_flags & MS_RDONLY) {
1049 dfr_err("RDONLY partition (err %d)", -EROFS);
1050 goto out;
1051 }
1052
1053 list_for_each_entry(ino_dfr, &sb_dfr->entry, entry) {
1054
1055 for (i = 0; i < ino_dfr->nr_chunks; i++) {
1056 int skip = 0;
1057
1058 chunk = &(ino_dfr->chunks[i]);
1059
1060 /* Do nothing if error occurred or update_fat_next already passed */
1061 if (chunk->stat == DFR_CHUNK_STAT_ERR)
1062 continue;
1063 if (chunk->stat & DFR_CHUNK_STAT_FAT) {
1064 done++;
1065 continue;
1066 }
1067
1068 /* Ship this chunk if get_block not passed for this chunk */
1069 for (j = 0; j < chunk->nr_clus; j++) {
1070 if (sbi->dfr_new_clus[chunk->new_idx + j] == 0) {
1071 skip = 1;
1072 break;
1073 }
1074 }
1075 if (skip)
1076 continue;
1077
1078 /* Update chunk's next cluster */
1079 FAT32_CHECK_CLUSTER(fsi,
1080 sbi->dfr_new_clus[chunk->new_idx + chunk->nr_clus - 1], err);
1081 BUG_ON(err);
1082 if (fat_ent_set(sb,
1083 sbi->dfr_new_clus[chunk->new_idx + chunk->nr_clus - 1],
1084 chunk->next_clus))
1085 goto out;
1086
1087 #ifdef CONFIG_SDFAT_DFR_DEBUG
1088 /* SPO test */
1089 defrag_spo_test(sb, DFR_SPO_RANDOM, __func__);
1090 #endif
1091
1092 /* Update chunk's state */
1093 chunk->stat |= DFR_CHUNK_STAT_FAT;
1094 done++;
1095 }
1096 }
1097
1098 out:
1099 dfr_debug("%s done (nr_reqs %d, done %d)", __func__, sb_dfr->nr_chunks - 1, done);
1100 }
1101
1102
1103 /**
1104 * @fn defrag_check_discard
1105 * @brief check if we can send discard for this AU, if so, send discard
1106 * @return void
1107 * @param sb super block
1108 * @remark protected by super_block and volume lock
1109 */
1110 void
1111 defrag_check_discard(
1112 IN struct super_block *sb)
1113 {
1114 FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
1115 AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
1116 AU_INFO_T *au = NULL;
1117 struct defrag_info *sb_dfr = &(SDFAT_SB(sb)->dfr_info);
1118 unsigned int tmp[DFR_MAX_AU_MOVED];
1119 int i = 0, j = 0;
1120
1121 BUG_ON(!amap);
1122
1123 if (!(SDFAT_SB(sb)->options.discard) ||
1124 !(SDFAT_SB(sb)->options.improved_allocation & SDFAT_ALLOC_SMART))
1125 return;
1126
1127 memset(tmp, 0, sizeof(int) * DFR_MAX_AU_MOVED);
1128
1129 for (i = REQ_HEADER_IDX + 1; i < sb_dfr->nr_chunks; i++) {
1130 struct defrag_chunk_info *chunk = &(sb_dfr->chunks[i]);
1131 int skip = 0;
1132
1133 au = GET_AU(amap, i_AU_of_CLU(amap, chunk->d_clus));
1134
1135 /* Send DISCARD for free AU */
1136 if ((IS_AU_IGNORED(au, amap)) &&
1137 (amap_get_freeclus(sb, chunk->d_clus) == CLUS_PER_AU(sb))) {
1138 sector_t blk = 0, nr_blks = 0;
1139 unsigned int au_align_factor = amap->option.au_align_factor % amap->option.au_size;
1140
1141 BUG_ON(au->idx == 0);
1142
1143 /* Avoid multiple DISCARD */
1144 for (j = 0; j < DFR_MAX_AU_MOVED; j++) {
1145 if (tmp[j] == au->idx) {
1146 skip = 1;
1147 break;
1148 }
1149 }
1150 if (skip == 1)
1151 continue;
1152
1153 /* Send DISCARD cmd */
1154 blk = (sector_t) (((au->idx * CLUS_PER_AU(sb)) << fsi->sect_per_clus_bits)
1155 - au_align_factor);
1156 nr_blks = ((sector_t)CLUS_PER_AU(sb)) << fsi->sect_per_clus_bits;
1157
1158 dfr_debug("Send DISCARD for AU[%d] (blk %08zx)", au->idx, blk);
1159 sb_issue_discard(sb, blk, nr_blks, GFP_NOFS, 0);
1160
1161 /* Save previous AU's index */
1162 for (j = 0; j < DFR_MAX_AU_MOVED; j++) {
1163 if (!tmp[j]) {
1164 tmp[j] = au->idx;
1165 break;
1166 }
1167 }
1168 }
1169 }
1170 }
1171
1172
1173 /**
1174 * @fn defrag_free_cluster
1175 * @brief free uneccessary cluster
1176 * @return void
1177 * @param sb super block
1178 * @param clus physical cluster num
1179 * @remark protected by super_block and volume lock
1180 */
1181 int
1182 defrag_free_cluster(
1183 struct super_block *sb,
1184 unsigned int clus)
1185 {
1186 FS_INFO_T *fsi = &SDFAT_SB(sb)->fsi;
1187 unsigned int val = 0;
1188 s32 err = 0;
1189
1190 FAT32_CHECK_CLUSTER(fsi, clus, err);
1191 BUG_ON(err);
1192 if (fat_ent_get(sb, clus, &val))
1193 return -EIO;
1194 if (val) {
1195 if (fat_ent_set(sb, clus, 0))
1196 return -EIO;
1197 } else {
1198 dfr_err("Free: Already freed, clus %08x, val %08x", clus, val);
1199 BUG_ON(!val);
1200 }
1201
1202 set_sb_dirty(sb);
1203 fsi->used_clusters--;
1204 if (fsi->amap)
1205 amap_release_cluster(sb, clus);
1206
1207 return 0;
1208 }
1209
1210
1211 /**
1212 * @fn defrag_check_defrag_required
1213 * @brief check if defrag required
1214 * @return 1 if required, 0 otherwise
1215 * @param sb super block
1216 * @param totalau # of total AUs
1217 * @param cleanau # of clean AUs
1218 * @param fullau # of full AUs
1219 * @remark protected by super_block
1220 */
1221 int
1222 defrag_check_defrag_required(
1223 IN struct super_block *sb,
1224 OUT int *totalau,
1225 OUT int *cleanau,
1226 OUT int *fullau)
1227 {
1228 FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
1229 AMAP_T *amap = NULL;
1230 int clean_ratio = 0, frag_ratio = 0;
1231 int ret = 0;
1232
1233 if (!sb || !(SDFAT_SB(sb)->options.defrag))
1234 return 0;
1235
1236 /* Check DFR_DEFAULT_STOP_RATIO first */
1237 fsi = &(SDFAT_SB(sb)->fsi);
1238 if (fsi->used_clusters == (unsigned int)(~0)) {
1239 if (fsi->fs_func->count_used_clusters(sb, &fsi->used_clusters))
1240 return -EIO;
1241 }
1242 if (fsi->used_clusters * DFR_FULL_RATIO >= fsi->num_clusters * DFR_DEFAULT_STOP_RATIO) {
1243 dfr_debug("used_clusters %d, num_clusters %d", fsi->used_clusters, fsi->num_clusters);
1244 return 0;
1245 }
1246
1247 /* Check clean/frag ratio */
1248 amap = SDFAT_SB(sb)->fsi.amap;
1249 BUG_ON(!amap);
1250
1251 clean_ratio = (amap->n_clean_au * 100) / amap->n_au;
1252 if (amap->n_full_au)
1253 frag_ratio = ((amap->n_au - amap->n_clean_au) * 100) / amap->n_full_au;
1254 else
1255 frag_ratio = ((amap->n_au - amap->n_clean_au) * 100) /
1256 (fsi->used_clusters * CLUS_PER_AU(sb));
1257
1258 /*
1259 * Wake-up defrag_daemon:
1260 * when # of clean AUs too small, or frag_ratio exceeds the limit
1261 */
1262 if ((clean_ratio < DFR_DEFAULT_WAKEUP_RATIO) ||
1263 ((clean_ratio < DFR_DEFAULT_CLEAN_RATIO) && (frag_ratio >= DFR_DEFAULT_FRAG_RATIO))) {
1264
1265 if (totalau)
1266 *totalau = amap->n_au;
1267 if (cleanau)
1268 *cleanau = amap->n_clean_au;
1269 if (fullau)
1270 *fullau = amap->n_full_au;
1271 ret = 1;
1272 }
1273
1274 return ret;
1275 }
1276
1277
1278 /**
1279 * @fn defrag_check_defrag_required
1280 * @brief check defrag status on inode
1281 * @return 1 if defrag in on, 0 otherwise
1282 * @param inode inode
1283 * @param start logical start addr
1284 * @param end logical end addr
1285 * @param cancel flag to cancel defrag
1286 * @param caller caller info
1287 */
1288 int
1289 defrag_check_defrag_on(
1290 INOUT struct inode *inode,
1291 IN loff_t start,
1292 IN loff_t end,
1293 IN int cancel,
1294 IN const char *caller)
1295 {
1296 struct super_block *sb = inode->i_sb;
1297 struct sdfat_sb_info *sbi = SDFAT_SB(sb);
1298 FS_INFO_T *fsi = &(sbi->fsi);
1299 struct defrag_info *ino_dfr = &(SDFAT_I(inode)->dfr_info);
1300 unsigned int clus_start = 0, clus_end = 0;
1301 int ret = 0, i = 0;
1302
1303 if (!inode || (start == end))
1304 return 0;
1305
1306 mutex_lock(&ino_dfr->lock);
1307 /* Check if this inode is on defrag */
1308 if (atomic_read(&ino_dfr->stat) == DFR_INO_STAT_REQ) {
1309
1310 clus_start = start >> (fsi->cluster_size_bits);
1311 clus_end = (end >> (fsi->cluster_size_bits)) +
1312 ((end & (fsi->cluster_size - 1)) ? 1 : 0);
1313
1314 if (!ino_dfr->chunks)
1315 goto error;
1316
1317 /* Check each chunk in given inode */
1318 for (i = 0; i < ino_dfr->nr_chunks; i++) {
1319 struct defrag_chunk_info *chunk = &(ino_dfr->chunks[i]);
1320 unsigned int chunk_start = 0, chunk_end = 0;
1321
1322 /* Skip this chunk when error occurred or it already passed defrag process */
1323 if ((chunk->stat == DFR_CHUNK_STAT_ERR) || (chunk->stat == DFR_CHUNK_STAT_PASS))
1324 continue;
1325
1326 chunk_start = chunk->f_clus;
1327 chunk_end = chunk->f_clus + chunk->nr_clus;
1328
1329 if (((clus_start >= chunk_start) && (clus_start < chunk_end)) ||
1330 ((clus_end > chunk_start) && (clus_end <= chunk_end)) ||
1331 ((clus_start < chunk_start) && (clus_end > chunk_end))) {
1332 ret = 1;
1333 if (cancel) {
1334 chunk->stat = DFR_CHUNK_STAT_ERR;
1335 dfr_debug("Defrag canceled: inode %p, start %08x, end %08x, caller %s",
1336 inode, clus_start, clus_end, caller);
1337 }
1338 }
1339 }
1340 }
1341
1342 error:
1343 BUG_ON(!mutex_is_locked(&ino_dfr->lock));
1344 mutex_unlock(&ino_dfr->lock);
1345 return ret;
1346 }
1347
1348
1349 #ifdef CONFIG_SDFAT_DFR_DEBUG
1350 /**
1351 * @fn defrag_spo_test
1352 * @brief test SPO while defrag running
1353 * @return void
1354 * @param sb super block
1355 * @param flag SPO debug flag
1356 * @param caller caller info
1357 */
1358 void
1359 defrag_spo_test(
1360 struct super_block *sb,
1361 int flag,
1362 const char *caller)
1363 {
1364 struct sdfat_sb_info *sbi = SDFAT_SB(sb);
1365
1366 if (!sb || !(SDFAT_SB(sb)->options.defrag))
1367 return;
1368
1369 if (flag == sbi->dfr_spo_flag) {
1370 dfr_err("Defrag SPO test (flag %d, caller %s)", flag, caller);
1371 panic("Defrag SPO test");
1372 }
1373 }
1374 #endif /* CONFIG_SDFAT_DFR_DEBUG */
1375
1376
1377 #endif /* CONFIG_SDFAT_DFR */