2 * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
20 /************************************************************************/
22 /* PROJECT : exFAT & FAT12/16/32 File System */
23 /* FILE : amap_smart.c */
24 /* PURPOSE : FAT32 Smart allocation code for sdFAT */
26 /*----------------------------------------------------------------------*/
30 /************************************************************************/
32 #include <linux/slab.h>
33 #include <linux/vmalloc.h>
37 #include "amap_smart.h"
39 /* AU list related functions */
40 static inline void amap_list_del(struct list_head
*entry
)
42 __list_del(entry
->prev
, entry
->next
);
44 /* Will be used to check if the entry is a single entry(selected) */
49 static inline int amap_insert_to_list(AU_INFO_T
*au
, struct slist_head
*shead
)
51 struct slist_head
*entry
= &au
->shead
;
55 entry
->next
= shead
->next
;
63 static inline int amap_remove_from_list(AU_INFO_T
*au
, struct slist_head
*shead
)
65 struct slist_head
*entry
= &au
->shead
;
66 struct slist_head
*iter
;
68 BUG_ON(entry
->head
!= shead
);
73 if (iter
->next
== entry
) {
74 // iter->next = iter->next->next
75 iter
->next
= entry
->next
;
84 BUG_ON("Not reachable");
87 /* Full-linear serach => Find AU with max. number of fclu */
88 static inline AU_INFO_T
* amap_find_hot_au_largest(struct slist_head
*shead
)
90 struct slist_head
*iter
;
91 uint16_t max_fclu
= 0;
92 AU_INFO_T
*entry
, *ret
= NULL
;
94 ASSERT(shead
->head
== shead
); /* Singly-list condition */
95 ASSERT(shead
->next
!= shead
);
100 entry
= list_entry(iter
, AU_INFO_T
, shead
);
102 if (entry
->free_clusters
> max_fclu
) {
103 max_fclu
= entry
->free_clusters
;
113 /* Find partially used AU with max. number of fclu.
114 If there is no partial AU available, pick a clean one */
115 static inline AU_INFO_T
* amap_find_hot_au_partial(AMAP_T
*amap
)
117 struct slist_head
*iter
;
118 uint16_t max_fclu
= 0;
119 AU_INFO_T
*entry
, *ret
= NULL
;
121 iter
= &amap
->slist_hot
;
122 ASSERT(iter
->head
== iter
); /* Singly-list condition */
123 ASSERT(iter
->next
!= iter
);
128 entry
= list_entry(iter
, AU_INFO_T
, shead
);
130 if (entry
->free_clusters
> max_fclu
) {
131 if (entry
->free_clusters
< amap
->clusters_per_au
) {
132 max_fclu
= entry
->free_clusters
;
150 Size-base AU management functions
154 Add au into cold AU MAP
155 au: an isolated (not in a list) AU data structure
157 int amap_add_cold_au(AMAP_T
*amap
, AU_INFO_T
*au
)
159 FCLU_NODE_T
*fclu_node
= NULL
;
161 /* Check if a single entry */
162 BUG_ON(au
->head
.prev
);
164 /* Ignore if the au is full */
165 if (!au
->free_clusters
)
169 fclu_node
= NODE(au
->free_clusters
, amap
);
171 /* Insert to the list */
172 list_add_tail(&(au
->head
), &(fclu_node
->head
));
174 /* Update fclu_hint (Increase) */
175 if (au
->free_clusters
> amap
->fclu_hint
)
176 amap
->fclu_hint
= au
->free_clusters
;
182 Remove an AU from AU MAP
184 int amap_remove_cold_au(AMAP_T
*amap
, AU_INFO_T
* au
)
186 struct list_head
*prev
= au
->head
.prev
;
188 /* Single entries are not managed in lists */
190 BUG_ON(au
->free_clusters
> 0);
194 /* remove from list */
195 amap_list_del(&(au
->head
));
201 /* "Find" best fit AU
202 returns NULL if there is no AU w/ enough free space.
204 This function doesn't change AU status.
205 The caller should call amap_remove_cold_au() if needed.
207 AU_INFO_T
* amap_find_cold_au_bestfit(AMAP_T
*amap
, uint16_t free_clusters
)
209 AU_INFO_T
*au
= NULL
;
210 FCLU_NODE_T
*fclu_iter
;
212 if (free_clusters
<= 0 || free_clusters
> amap
->clusters_per_au
) {
213 EMSG("AMAP: amap_find_cold_au_bestfit / unexpected arg. (%d)\n",
218 fclu_iter
= NODE(free_clusters
, amap
);
220 if (amap
->fclu_hint
< free_clusters
) {
221 /* There is no AUs with enough free_clusters */
225 /* Naive Hash management (++) */
227 if (!list_empty(&fclu_iter
->head
)) {
228 struct list_head
*first
= fclu_iter
->head
.next
;
230 au
= list_entry(first
, AU_INFO_T
, head
);
236 } while (fclu_iter
< (amap
->fclu_nodes
+ amap
->clusters_per_au
));
239 // BUG_ON(au->free_clusters < 0);
240 BUG_ON(au
&& (au
->free_clusters
> amap
->clusters_per_au
));
247 returns NULL if there is no AU w/ enough free space.
248 The returned AU will not be in the list anymore.
250 AU_INFO_T
* amap_pop_cold_au_bestfit(AMAP_T
*amap
, uint16_t free_clusters
)
252 /* Naive implementation */
255 au
= amap_find_cold_au_bestfit(amap
, free_clusters
);
257 amap_remove_cold_au(amap
, au
);
264 /* Pop the AU with the largest free space
266 search from 'start_fclu' to 0
267 (target freecluster : -1 for each step)
269 start_fclu = 0 means to search from the max. value
271 AU_INFO_T
* amap_pop_cold_au_largest(AMAP_T
*amap
, uint16_t start_fclu
)
273 AU_INFO_T
* au
= NULL
;
274 FCLU_NODE_T
*fclu_iter
;
277 start_fclu
= amap
->clusters_per_au
;
278 if (start_fclu
> amap
->clusters_per_au
)
279 start_fclu
= amap
->clusters_per_au
;
281 /* Use hint (search start point) */
282 if (amap
->fclu_hint
< start_fclu
)
283 fclu_iter
= NODE(amap
->fclu_hint
, amap
);
285 fclu_iter
= NODE(start_fclu
, amap
);
287 /* Naive Hash management */
289 if (!list_empty(&fclu_iter
->head
)) {
290 struct list_head
*first
= fclu_iter
->head
.next
;
292 au
= list_entry(first
, AU_INFO_T
, head
);
293 // BUG_ON((au < amap->entries) || ((amap->entries + amap->n_au) <= au));
295 amap_list_del(first
);
297 // (Hint) Possible maximum value of free clusters (among cold)
298 /* if it wasn't the whole search, don't update fclu_hint */
299 if (start_fclu
== amap
->clusters_per_au
)
300 amap
->fclu_hint
= au
->free_clusters
;
306 } while (amap
->fclu_nodes
<= fclu_iter
);
314 ===============================================
315 Allocation Map related functions
316 ===============================================
319 /* Create AMAP related data structure (mount time) */
320 int amap_create(struct super_block
*sb
, u32 pack_ratio
, u32 sect_per_au
, u32 hidden_sect
)
322 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
324 int total_used_clusters
;
327 int i_au_root
= -1, i_au_hot_from
= INT_MAX
;
328 u32 misaligned_sect
= hidden_sect
;
330 BUG_ON(!fsi
->bd_opened
);
335 /* Check conditions */
336 if (fsi
->vol_type
!= FAT32
) {
337 sdfat_msg(sb
, KERN_ERR
, "smart allocation is only available "
342 if (fsi
->num_sectors
< AMAP_MIN_SUPPORT_SECTORS
) {
343 sdfat_msg(sb
, KERN_ERR
, "smart allocation is only available "
344 "with sectors above %d", AMAP_MIN_SUPPORT_SECTORS
);
348 /* AU size must be a multiple of clu_size */
349 if ((sect_per_au
<= 0) || (sect_per_au
& (fsi
->sect_per_clus
- 1))) {
350 sdfat_msg(sb
, KERN_ERR
,
351 "invalid AU size (sect_per_au : %u, "
352 "sect_per_clus : %u) "
353 "please re-format for performance.",
354 sect_per_au
, fsi
->sect_per_clus
);
358 /* the start sector of this partition must be a multiple of clu_size */
359 if (misaligned_sect
& (fsi
->sect_per_clus
- 1)) {
360 sdfat_msg(sb
, KERN_ERR
,
361 "misaligned part (start sect : %u, "
362 "sect_per_clus : %u) "
363 "please re-format for performance.",
364 misaligned_sect
, fsi
->sect_per_clus
);
368 /* data start sector must be a multiple of clu_size */
369 if (fsi
->data_start_sector
& (fsi
->sect_per_clus
- 1)) {
370 sdfat_msg(sb
, KERN_ERR
,
371 "misaligned data area (start sect : %u, "
372 "sect_per_clus : %u) "
373 "please re-format for performance.",
374 fsi
->data_start_sector
, fsi
->sect_per_clus
);
378 misaligned_sect
&= (sect_per_au
- 1);
380 /* Allocate data structrues */
381 amap
= kzalloc(sizeof(AMAP_T
), GFP_NOIO
);
387 amap
->n_au
= (fsi
->num_sectors
+ misaligned_sect
+ sect_per_au
- 1) / sect_per_au
;
388 amap
->n_clean_au
= 0;
391 /* Reflect block-partition align first,
392 then partition-data_start align */
393 amap
->clu_align_bias
= (misaligned_sect
/ fsi
->sect_per_clus
);
394 amap
->clu_align_bias
+= (fsi
->data_start_sector
>> fsi
->sect_per_clus_bits
) - CLUS_BASE
;
395 amap
->clusters_per_au
= sect_per_au
/ fsi
->sect_per_clus
;
398 * the size of cluster is at least 4KB if the size of AU is 4MB
400 if (amap
->clusters_per_au
> MAX_CLU_PER_AU
) {
401 sdfat_log_msg(sb
, KERN_INFO
,
402 "too many clusters per AU (clus/au:%d > %d).",
403 amap
->clusters_per_au
,
407 /* is it needed? why here? */
410 spin_lock_init(&amap
->amap_lock
);
412 amap
->option
.packing_ratio
= pack_ratio
;
413 amap
->option
.au_size
= sect_per_au
;
414 amap
->option
.au_align_factor
= hidden_sect
;
417 /* Allocate AU info table */
418 n_au_table
= (amap
->n_au
+ N_AU_PER_TABLE
- 1) / N_AU_PER_TABLE
;
419 amap
->au_table
= kmalloc(sizeof(AU_INFO_T
*) * n_au_table
, GFP_NOIO
);
421 if (!amap
->au_table
) {
422 sdfat_msg(sb
, KERN_ERR
,
423 "failed to alloc amap->au_table\n");
428 for (i
= 0; i
< n_au_table
; i
++)
429 amap
->au_table
[i
] = (AU_INFO_T
*)get_zeroed_page(GFP_NOIO
);
431 /* Allocate buckets indexed by # of free clusters */
432 amap
->fclu_order
= get_order(sizeof(FCLU_NODE_T
) * amap
->clusters_per_au
);
434 // XXX: amap->clusters_per_au limitation is 512 (w/ 8 byte list_head)
435 sdfat_log_msg(sb
, KERN_INFO
, "page orders for AU nodes : %d "
436 "(clus_per_au : %d, node_size : %lu)",
438 amap
->clusters_per_au
,
439 (unsigned long)sizeof(FCLU_NODE_T
));
441 if (!amap
->fclu_order
)
442 amap
->fclu_nodes
= (FCLU_NODE_T
*)get_zeroed_page(GFP_NOIO
);
444 amap
->fclu_nodes
= (FCLU_NODE_T
*)vzalloc(PAGE_SIZE
<< amap
->fclu_order
);
446 amap
->fclu_hint
= amap
->clusters_per_au
;
448 /* Hot AU list, ignored AU list */
449 amap
->slist_hot
.next
= NULL
;
450 amap
->slist_hot
.head
= &amap
->slist_hot
;
451 amap
->total_fclu_hot
= 0;
453 amap
->slist_ignored
.next
= NULL
;
454 amap
->slist_ignored
.head
= &amap
->slist_ignored
;
456 /* Strategy related vars. */
457 amap
->cur_cold
.au
= NULL
;
458 amap
->cur_hot
.au
= NULL
;
459 amap
->n_need_packing
= 0;
462 /* Build AMAP info */
463 total_used_clusters
= 0; // Count # of used clusters
465 i_au_root
= i_AU_of_CLU(amap
, fsi
->root_dir
);
466 i_au_hot_from
= amap
->n_au
- (SMART_ALLOC_N_HOT_AU
- 1);
468 for (i
= 0; i
< amap
->clusters_per_au
; i
++)
469 INIT_LIST_HEAD(&amap
->fclu_nodes
[i
].head
);
474 amap->entries[i_au].free_clusters = 0;
475 amap->entries[i_au].head.prev = NULL;
476 amap->entries[i_au].head.next = NULL;
479 /* Parse FAT table */
480 for (i_clu
= CLUS_BASE
; i_clu
< fsi
->num_clusters
; i_clu
++){
484 if (fat_ent_get(sb
, i_clu
, &clu_data
)) {
485 sdfat_msg(sb
, KERN_ERR
,
486 "failed to read fat entry(%u)\n", i_clu
);
490 if (IS_CLUS_FREE(clu_data
)) {
491 au
= GET_AU(amap
, i_AU_of_CLU(amap
, i_clu
));
494 total_used_clusters
++;
498 for (i_au
= 0; i_au
< amap
->n_au
; i_au
++){
499 AU_INFO_T
*au
= GET_AU(amap
, i_au
);
502 BUG_ON(au
->free_clusters
> amap
->clusters_per_au
);
504 if (au
->free_clusters
== amap
->clusters_per_au
)
506 else if (au
->free_clusters
== 0)
509 /* If hot, insert to the hot list */
510 if (i_au
>= i_au_hot_from
) {
511 amap_add_hot_au(amap
, au
);
512 amap
->total_fclu_hot
+= au
->free_clusters
;
513 } else if (i_au
!= i_au_root
|| SMART_ALLOC_N_HOT_AU
== 0) {
514 /* Otherwise, insert to the free cluster hash */
515 amap_add_cold_au(amap
, au
);
519 /* Hot list -> (root) -> (last) -> (last - 1) -> ... */
520 if (i_au_root
>= 0 && SMART_ALLOC_N_HOT_AU
> 0) {
521 amap_add_hot_au(amap
, GET_AU(amap
, i_au_root
));
522 amap
->total_fclu_hot
+= GET_AU(amap
, i_au_root
)->free_clusters
;
527 fsi
->used_clusters
= total_used_clusters
;
529 sdfat_msg(sb
, KERN_INFO
,
530 "AMAP: Smart allocation enabled (opt : %u / %u / %u)",
531 amap
->option
.au_size
, amap
->option
.au_align_factor
,
532 amap
->option
.packing_ratio
);
534 /* Debug purpose - check */
537 //fat_count_used_clusters(sb, &used_clusters)
538 //ASSERT(used_clusters == total_used_clusters);
546 if (amap
->au_table
) {
547 for (i
= 0; i
< n_au_table
; i
++)
548 free_page((unsigned long)amap
->au_table
[i
]);
549 kfree(amap
->au_table
);
551 if (amap
->fclu_nodes
) {
552 if (!amap
->fclu_order
)
553 free_page((unsigned long)amap
->fclu_nodes
);
555 vfree(amap
->fclu_nodes
);
563 /* Free AMAP related structure */
564 void amap_destroy(struct super_block
*sb
)
566 AMAP_T
*amap
= SDFAT_SB(sb
)->fsi
.amap
;
572 DMSG("%s\n", __func__
);
574 n_au_table
= (amap
->n_au
+ N_AU_PER_TABLE
- 1) / N_AU_PER_TABLE
;
576 if (amap
->au_table
) {
578 for (i
= 0; i
< n_au_table
; i
++)
579 free_page((unsigned long)amap
->au_table
[i
]);
581 kfree(amap
->au_table
);
583 if (!amap
->fclu_order
)
584 free_page((unsigned long)amap
->fclu_nodes
);
586 vfree(amap
->fclu_nodes
);
588 SDFAT_SB(sb
)->fsi
.amap
= NULL
;
596 and change destination if needed to disable AU-aligned alloc.
597 (from ALLOC_COLD_ALIGNED to ALLOC_COLD_SEQ)
599 static inline int amap_update_dest(AMAP_T
*amap
, int ori_dest
)
601 FS_INFO_T
*fsi
= &(SDFAT_SB(amap
->sb
)->fsi
);
602 int n_partial_au
, n_partial_freeclus
;
604 if (ori_dest
!= ALLOC_COLD_ALIGNED
)
607 /* # of partial AUs and # of clusters in those AUs */
608 n_partial_au
= amap
->n_au
- amap
->n_clean_au
- amap
->n_full_au
;
609 n_partial_freeclus
= fsi
->num_clusters
- fsi
->used_clusters
-
610 amap
->clusters_per_au
* amap
->n_clean_au
;
612 /* Status of AUs : Full / Partial / Clean
613 If there are many partial (and badly fragmented) AUs,
614 the throughput will decrease extremly.
616 The follow code will treat those worst cases.
619 // XXX: AMAP heuristics
620 if ((amap
->n_clean_au
* 50 <= amap
->n_au
) &&
621 (n_partial_freeclus
*2) < (n_partial_au
*amap
->clusters_per_au
)) {
622 /* If clean AUs are fewer than 2% of n_au (80 AUs per 16GB)
623 and fragment ratio is more than 2 (AVG free_clusters=half AU)
625 disable clean-first allocation
626 enable VFAT-like sequential allocation
629 return ALLOC_COLD_SEQ
;
636 #define PACKING_SOFTLIMIT (amap->option.packing_ratio)
637 #define PACKING_HARDLIMIT (amap->option.packing_ratio * 4)
639 Pick a packing AU if needed.
640 Otherwise just return NULL
642 This function includes some heuristics.
644 static inline AU_INFO_T
* amap_get_packing_au(AMAP_T
*amap
, int dest
, int num_to_wb
, int *clu_to_skip
)
646 AU_INFO_T
* au
= NULL
;
648 if (dest
== ALLOC_COLD_PACKING
) {
649 /* ALLOC_COLD_PACKING:
650 Packing-first mode for defrag.
651 Optimized to save clean AU
654 2) Smallest AU (w/ minimum free clusters)
657 if (num_to_wb
>= amap
->clusters_per_au
)
658 num_to_wb
= num_to_wb
% amap
->clusters_per_au
;
660 /* 이거 주석처리하면, AU size 딱 맞을때는 clean, 나머지는 작은거부터 */
662 num_to_wb
= 1; // Don't use clean AUs
664 au
= amap_find_cold_au_bestfit(amap
, num_to_wb
);
666 if (au
&& au
->free_clusters
== amap
->clusters_per_au
&& num_to_wb
> 1) {
667 // if au is clean then get a new partial one
668 au
= amap_find_cold_au_bestfit(amap
, 1);
672 amap
->n_need_packing
= 0;
673 amap_remove_cold_au(amap
, au
);
679 /* Heuristic packing:
680 This will improve QoS greatly.
682 Count # of AU_ALLIGNED allocation.
683 If the number exceeds the specific threshold,
684 allocate on a partial AU or generate random I/O.
686 if ((PACKING_SOFTLIMIT
> 0) && \
687 (amap
->n_need_packing
>= PACKING_SOFTLIMIT
) && \
688 (num_to_wb
< (int)amap
->clusters_per_au
) ){
690 If num_to_wb (expected number to be allocated) is smaller than AU_SIZE,
694 // Back margin (heuristics)
695 if (num_to_wb
< amap
->clusters_per_au
/ 4)
696 num_to_wb
= amap
->clusters_per_au
/ 4;
698 au
= amap_find_cold_au_bestfit(amap
, num_to_wb
);
701 amap_remove_cold_au(amap
, au
);
703 MMSG("AMAP: packing (cnt: %d) / softlimit, "
704 "best-fit (num_to_wb: %d))\n",
705 amap
->n_need_packing
, num_to_wb
);
707 if (au
->free_clusters
> num_to_wb
) { // Best-fit search: if 문 무조건 hit
708 *clu_to_skip
= au
->free_clusters
- num_to_wb
;
709 /* otherwise don't skip */
711 amap
->n_need_packing
= 0;
716 if (PACKING_HARDLIMIT
!= 0 && \
717 amap
->n_need_packing
>= PACKING_HARDLIMIT
) {
718 /* Compulsory SLC flushing:
719 If there was no chance to do best-fit packing
720 and the # of AU-aligned allocation exceeds HARD threshold,
721 then pick a clean AU and generate a compulsory random I/O.
723 au
= amap_pop_cold_au_largest(amap
, amap
->clusters_per_au
);
726 MMSG("AMAP: packing (cnt: %d) / hard-limit, largest)\n",
727 amap
->n_need_packing
);
729 if (au
->free_clusters
>= 96) {
730 *clu_to_skip
= au
->free_clusters
/ 2;
731 MMSG("AMAP: cluster idx re-position\n");
733 amap
->n_need_packing
= 0;
738 /* Update # of clean AU allocation */
739 amap
->n_need_packing
++;
745 - This function should be called only if there are one or more free clusters in the bdev.
747 TARGET_AU_T
*amap_get_target_au(AMAP_T
*amap
, int dest
, int num_to_wb
)
752 if (++loop_count
>= 3) {
753 /* No space available (or AMAP consistency error)
754 This could happen because of the ignored AUs
756 (because the defrag daemon will not work if there is no enough space)
758 BUG_ON(amap
->slist_ignored
.next
== NULL
);
762 /* Hot clusters (DIR) */
763 if (dest
== ALLOC_HOT
) {
765 /* Working hot AU exist? */
766 if (amap
->cur_hot
.au
== NULL
|| amap
->cur_hot
.au
->free_clusters
== 0) {
769 if (amap
->total_fclu_hot
== 0) {
770 /* No more hot AU avaialbe */
776 au
= amap_find_hot_au_partial(amap
);
779 BUG_ON(au
->free_clusters
<= 0);
781 amap
->cur_hot
.au
= au
;
782 amap
->cur_hot
.idx
= 0;
783 amap
->cur_hot
.clu_to_skip
= 0;
786 /* Now allocate on a hot AU */
787 return &amap
->cur_hot
;
792 If amap->cur_cold.au has one or more free cluster(s),
793 then just return amap->cur_cold
795 if ( (!amap
->cur_cold
.au
) \
796 || (amap
->cur_cold
.idx
== amap
->clusters_per_au
) \
797 || (amap
->cur_cold
.au
->free_clusters
== 0)) {
799 AU_INFO_T
*au
= NULL
;
800 const AU_INFO_T
*old_au
= amap
->cur_cold
.au
;
801 int n_clu_to_skip
= 0;
804 ASSERT(!IS_AU_WORKING(old_au
, amap
));
805 // must be NOT WORKING AU. (only for information gathering)
808 /* Next target AU is needed:
809 There are 3 possible ALLOC options for cold AU
811 ALLOC_COLD_ALGINED: Clean AU first, but heuristic packing is ON
812 ALLOC_COLD_PACKING: Packing AU first (usually for defrag)
813 ALLOC_COLD_SEQ : Sequential AU allocation (VFAT-like)
817 /* Experimental: Modify allocation destination if needed (ALIGNED => SEQ) */
818 // dest = amap_update_dest(amap, dest);
820 if ((dest
== ALLOC_COLD_SEQ
) && old_au
) {
821 int i_au
= old_au
->idx
+ 1;
823 while (i_au
!= old_au
->idx
) {
824 au
= GET_AU(amap
, i_au
);
826 if ((au
->free_clusters
> 0) &&
827 !IS_AU_HOT(au
, amap
) &&
828 !IS_AU_IGNORED(au
, amap
)) {
830 MMSG("AMAP: new cold AU(%d) with %d "
832 au
->idx
, au
->free_clusters
);
834 amap_remove_cold_au(amap
, au
);
838 if (i_au
>= amap
->n_au
)
842 // no cold AUs are available => Hot allocation
849 * Check if packing is needed
850 * (ALLOC_COLD_PACKING is treated by this function)
852 au
= amap_get_packing_au(amap
, dest
, num_to_wb
, &n_clu_to_skip
);
854 MMSG("AMAP: new cold AU(%d) with %d clusters "
855 "(packing)\n", au
->idx
, au
->free_clusters
);
859 /* ALLOC_COLD_ALIGNED */
860 /* Check if the adjacent AU is clean */
861 if (old_au
&& ((old_au
->idx
+ 1) < amap
->n_au
)) {
862 au
= GET_AU(amap
, old_au
->idx
+ 1);
864 if ((au
->free_clusters
== amap
->clusters_per_au
) &&
865 !IS_AU_HOT(au
, amap
) &&
866 !IS_AU_IGNORED(au
, amap
)) {
867 MMSG("AMAP: new cold AU(%d) with %d clusters "
868 "(adjacent)\n", au
->idx
, au
->free_clusters
);
869 amap_remove_cold_au(amap
, au
);
874 /* Clean or largest AU */
875 au
= amap_pop_cold_au_largest(amap
, 0);
877 //ASSERT(amap->total_fclu_hot == (fsi->num_clusters - fsi->used_clusters - 2));
882 MMSG("AMAP: New cold AU (%d) with %d clusters\n", \
883 au
->idx
, au
->free_clusters
);
888 amap
->cur_cold
.au
= au
;
889 amap
->cur_cold
.idx
= 0;
890 amap
->cur_cold
.clu_to_skip
= n_clu_to_skip
;
893 return &amap
->cur_cold
;
896 /* Put and update target AU */
897 void amap_put_target_au(AMAP_T
*amap
, TARGET_AU_T
*cur
, int num_allocated
)
899 /* Update AMAP info vars. */
900 if (num_allocated
> 0 && \
901 (cur
->au
->free_clusters
+ num_allocated
) == amap
->clusters_per_au
)
902 // if the target AU was a clean AU before this allocation ...
904 if (num_allocated
> 0 && \
905 cur
->au
->free_clusters
== 0)
909 if (IS_AU_HOT(cur
->au
, amap
)) {
911 MMSG("AMAP: hot allocation at AU %d\n", cur
->au
->idx
);
912 amap
->total_fclu_hot
-= num_allocated
;
914 /* Intra-AU round-robin */
915 if (cur
->idx
>= amap
->clusters_per_au
)
918 /* No more space available */
919 if (cur
->au
->free_clusters
== 0)
924 ASSERT(IS_AU_WORKING(cur
->au
, amap
));
926 if (cur
->idx
>= amap
->clusters_per_au
|| cur
->au
->free_clusters
== 0) {
927 /* It should be inserted back to AU MAP */
928 cur
->au
->shead
.head
= NULL
; // SET_AU_NOT_WORKING
929 amap_add_cold_au(amap
, cur
->au
);
931 // cur->au = NULL; // This value will be used for the next AU selection
932 cur
->idx
= amap
->clusters_per_au
; // AU closing
939 /* Reposition target->idx for packing
942 Skip (num_to_skip) free clusters in (cur->au)
944 static inline int amap_skip_cluster(struct super_block
*sb
, TARGET_AU_T
*cur
, int num_to_skip
)
946 AMAP_T
*amap
= SDFAT_SB(sb
)->fsi
.amap
;
948 MMSG_VAR(int num_to_skip_orig
= num_to_skip
);
950 if (num_to_skip
>= cur
->au
->free_clusters
) {
951 EMSG("AMAP(%s): skip mis-use. amap_566\n", __func__
);
955 clu
= CLU_of_i_AU(amap
, cur
->au
->idx
, cur
->idx
);
957 while (num_to_skip
> 0) {
958 if (clu
>= CLUS_BASE
) {
960 * If AMAP's integrity is okay,
961 * we don't need to check if (clu < fsi->num_clusters)
964 if (fat_ent_get(sb
, clu
, &read_clu
))
967 if (IS_CLUS_FREE(read_clu
))
975 if (cur
->idx
>= amap
->clusters_per_au
) {
976 /* End of AU (Not supposed) */
977 EMSG("AMAP: Skip - End of AU?! (amap_596)\n");
983 MMSG("AMAP: Skip_clusters (%d skipped => %d, among %d free clus)\n",\
984 num_to_skip_orig
, cur
->idx
, cur
->au
->free_clusters
);
990 /* AMAP-based allocation function for FAT32 */
991 s32
amap_fat_alloc_cluster(struct super_block
*sb
, s32 num_alloc
, CHAIN_T
*p_chain
, int dest
)
993 AMAP_T
*amap
= SDFAT_SB(sb
)->fsi
.amap
;
994 TARGET_AU_T
*cur
= NULL
;
995 AU_INFO_T
*target_au
= NULL
; /* Allocation target AU */
996 u32 last_clu
= CLUS_EOF
, read_clu
;
997 s32 new_clu
; // Max. 2G 개의 clusters
998 s32 num_allocated
= 0, num_allocated_each
= 0;
999 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
1002 BUG_ON(IS_CLUS_EOF(fsi
->used_clusters
));
1004 p_chain
->dir
= CLUS_EOF
;
1006 if ((fsi
->used_clusters
+ num_alloc
) > (fsi
->num_clusters
- CLUS_BASE
)) {
1007 /* Reserved count management error
1008 or called by dir. management function on fully filled disk */
1009 num_alloc
= fsi
->num_clusters
- fsi
->used_clusters
- CLUS_BASE
;
1011 if (unlikely(num_alloc
< 0)) {
1012 sdfat_fs_error_ratelimit(sb
,
1013 "AMAP(%s): invalid used clusters(t:%u,u:%u)\n",
1014 __func__
, fsi
->num_clusters
, fsi
->used_clusters
);
1024 // spin_lock(&amap->amap_lock);
1027 /* Allocation strategy implemented */
1028 cur
= amap_get_target_au(amap
, dest
, fsi
->reserved_clusters
);
1029 if (unlikely(!cur
)) {
1030 // There is no available AU (only ignored-AU are left)
1031 sdfat_msg(sb
, KERN_ERR
, "AMAP Allocator: no avaialble AU.");
1035 /* If there are clusters to skip */
1036 if (cur
->clu_to_skip
> 0) {
1037 if (amap_skip_cluster(sb
, &amap
->cur_cold
, cur
->clu_to_skip
))
1039 cur
->clu_to_skip
= 0;
1042 target_au
= cur
->au
;
1046 * cur->au : target AU info pointer
1047 * cur->idx : the intra-cluster idx in the AU to start from
1051 BUG_ON(!cur
->au
->free_clusters
);
1052 BUG_ON(cur
->idx
>= amap
->clusters_per_au
);
1054 num_allocated_each
= 0;
1055 new_clu
= CLU_of_i_AU(amap
, target_au
->idx
, cur
->idx
);
1058 /* Allocate at the target AU */
1059 if ((new_clu
>= CLUS_BASE
) && (new_clu
< fsi
->num_clusters
)) {
1060 if (fat_ent_get(sb
, new_clu
, &read_clu
))
1061 // spin_unlock(&amap->amap_lock);
1062 return -EIO
; // goto err_and_return
1064 if (IS_CLUS_FREE(read_clu
)) {
1065 BUG_ON(GET_AU(amap
, i_AU_of_CLU(amap
, new_clu
)) != target_au
);
1067 /* Free cluster found */
1068 if (fat_ent_set(sb
, new_clu
, CLUS_EOF
))
1071 num_allocated_each
++;
1073 if (IS_CLUS_EOF(p_chain
->dir
))
1074 p_chain
->dir
= new_clu
;
1076 if (fat_ent_set(sb
, last_clu
, new_clu
))
1081 /* Update au info */
1082 target_au
->free_clusters
--;
1091 if ((cur
->idx
>= amap
->clusters_per_au
) || !(target_au
->free_clusters
))
1093 } while(num_allocated_each
< num_alloc
);
1096 /* Update strategy info */
1097 amap_put_target_au(amap
, cur
, num_allocated_each
);
1100 num_allocated
+= num_allocated_each
;
1101 fsi
->used_clusters
+= num_allocated_each
;
1102 num_alloc
-= num_allocated_each
;
1108 // spin_unlock(&amap->amap_lock);
1109 return num_allocated
;
1113 /* Free cluster for FAT32 (not implemented yet) */
1114 s32
amap_free_cluster(struct super_block
*sb
, CHAIN_T
*p_chain
, s32 do_relse
)
1121 This is called by fat_free_cluster()
1122 to update AMAP info.
1124 s32
amap_release_cluster(struct super_block
*sb
, u32 clu
)
1126 AMAP_T
*amap
= SDFAT_SB(sb
)->fsi
.amap
;
1130 // spin_lock(&amap->amap_lock);
1132 /* Update AU info */
1133 i_au
= i_AU_of_CLU(amap
, clu
);
1134 BUG_ON(i_au
>= amap
->n_au
);
1135 au
= GET_AU(amap
, i_au
);
1136 BUG_ON(au
->free_clusters
>= amap
->clusters_per_au
);
1138 if (IS_AU_HOT(au
, amap
)) {
1139 MMSG("AMAP: Hot cluster freed\n");
1140 au
->free_clusters
++;
1141 amap
->total_fclu_hot
++;
1142 } else if (!IS_AU_WORKING(au
, amap
) && !IS_AU_IGNORED(au
, amap
)) {
1143 /* Ordinary AU - update AU tree */
1144 // Can be optimized by implmenting amap_update_au
1145 amap_remove_cold_au(amap
, au
);
1146 au
->free_clusters
++;
1147 amap_add_cold_au(amap
, au
);
1149 au
->free_clusters
++;
1152 /* Update AMAP info */
1153 if (au
->free_clusters
== amap
->clusters_per_au
)
1155 if (au
->free_clusters
== 1)
1158 // spin_unlock(&amap->amap_lock);
1164 Check if the cluster is in a working AU
1165 The caller should hold sb lock.
1166 This func. should be used only if smart allocation is on
1168 s32
amap_check_working(struct super_block
*sb
, u32 clu
)
1170 AMAP_T
*amap
= SDFAT_SB(sb
)->fsi
.amap
;
1175 au
= GET_AU(amap
, i_AU_of_CLU(amap
, clu
));
1177 return (IS_AU_WORKING(au
, amap
));
1182 Return the # of free clusters in that AU
1184 s32
amap_get_freeclus(struct super_block
*sb
, u32 clu
)
1186 AMAP_T
*amap
= SDFAT_SB(sb
)->fsi
.amap
;
1191 au
= GET_AU(amap
, i_AU_of_CLU(amap
, clu
));
1193 return ((s32
)au
->free_clusters
);
1198 Add the AU containing 'clu' to the ignored AU list.
1199 The AU will not be used by the allocator.
1201 XXX: Ignored counter needed
1203 s32
amap_mark_ignore(struct super_block
*sb
, u32 clu
)
1205 AMAP_T
*amap
= SDFAT_SB(sb
)->fsi
.amap
;
1210 au
= GET_AU(amap
, i_AU_of_CLU(amap
, clu
));
1213 if (IS_AU_HOT(au
, amap
)) {
1214 // Doesn't work with hot AUs
1216 } else if (IS_AU_WORKING(au
, amap
)) {
1220 //BUG_ON(IS_AU_IGNORED(au, amap) && (GET_IGN_CNT(au) == 0));
1221 if (IS_AU_IGNORED(au
, amap
))
1224 amap_remove_cold_au(amap
, au
);
1225 amap_insert_to_list(au
, &amap
->slist_ignored
);
1227 BUG_ON(!IS_AU_IGNORED(au
, amap
));
1231 MMSG("AMAP: Mark ignored AU (%d)\n", au
->idx
);
1238 This function could be used only on IGNORED AUs.
1239 The caller should care whether it's ignored or not before using this func.
1241 s32
amap_unmark_ignore(struct super_block
*sb
, u32 clu
)
1243 AMAP_T
*amap
= SDFAT_SB(sb
)->fsi
.amap
;
1248 au
= GET_AU(amap
, i_AU_of_CLU(amap
, clu
));
1250 BUG_ON(!IS_AU_IGNORED(au
, amap
));
1251 // BUG_ON(GET_IGN_CNT(au) == 0);
1253 amap_remove_from_list(au
, &amap
->slist_ignored
);
1254 amap_add_cold_au(amap
, au
);
1256 BUG_ON(IS_AU_IGNORED(au
, amap
));
1260 MMSG("AMAP: Unmark ignored AU (%d)\n", au
->idx
);
1266 Unmark all ignored AU
1267 This will return # of unmarked AUs
1269 s32
amap_unmark_ignore_all(struct super_block
*sb
)
1271 AMAP_T
*amap
= SDFAT_SB(sb
)->fsi
.amap
;
1272 struct slist_head
*entry
;
1278 entry
= amap
->slist_ignored
.next
;
1280 au
= list_entry(entry
, AU_INFO_T
, shead
);
1282 BUG_ON(au
!= GET_AU(amap
, au
->idx
));
1283 BUG_ON(!IS_AU_IGNORED(au
, amap
));
1285 //CLEAR_IGN_CNT(au);
1287 amap_remove_from_list(au
, &amap
->slist_ignored
);
1288 amap_add_cold_au(amap
, au
);
1290 MMSG("AMAP: Unmark ignored AU (%d)\n", au
->idx
);
1293 entry
= amap
->slist_ignored
.next
;
1296 BUG_ON(amap
->slist_ignored
.next
!= NULL
);
1297 MMSG("AMAP: unmark_ignore_all, total %d AUs\n", n
);
1303 * @fn amap_get_au_stat
1304 * @brief report AUs status depending on mode
1305 * @return positive on success, 0 otherwise
1306 * @param sbi super block info
1307 * @param mode TOTAL, CLEAN and FULL
1309 u32
amap_get_au_stat(struct super_block
*sb
, s32 mode
)
1311 AMAP_T
*amap
= SDFAT_SB(sb
)->fsi
.amap
;
1316 if (mode
== VOL_AU_STAT_TOTAL
)
1318 else if (mode
== VOL_AU_STAT_CLEAN
)
1319 return amap
->n_clean_au
;
1320 else if (mode
== VOL_AU_STAT_FULL
)
1321 return amap
->n_full_au
;