V4L/DVB (13571): v4l: Adding Digital Video Timings APIs
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / fs / sdfat / amap_smart.c
1 /*
2 * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
17 * MA 02110-1301, USA.
18 */
19
20 /************************************************************************/
21 /* */
22 /* PROJECT : exFAT & FAT12/16/32 File System */
23 /* FILE : amap_smart.c */
24 /* PURPOSE : FAT32 Smart allocation code for sdFAT */
25 /* */
26 /*----------------------------------------------------------------------*/
27 /* NOTES */
28 /* */
29 /* */
30 /************************************************************************/
31
32 #include <linux/slab.h>
33 #include <linux/vmalloc.h>
34
35 #include "sdfat.h"
36 #include "core.h"
37 #include "amap_smart.h"
38
39 /* AU list related functions */
40 static inline void amap_list_del(struct list_head *entry)
41 {
42 __list_del(entry->prev, entry->next);
43
44 /* Will be used to check if the entry is a single entry(selected) */
45 entry->prev = NULL;
46 entry->next = NULL;
47 }
48
49 static inline int amap_insert_to_list(AU_INFO_T *au, struct slist_head *shead)
50 {
51 struct slist_head *entry = &au->shead;
52
53 ASSERT(!entry->head);
54
55 entry->next = shead->next;
56 entry->head = shead;
57
58 shead->next = entry;
59
60 return 0;
61 }
62
63 static inline int amap_remove_from_list(AU_INFO_T *au, struct slist_head *shead)
64 {
65 struct slist_head *entry = &au->shead;
66 struct slist_head *iter;
67
68 BUG_ON(entry->head != shead);
69
70 iter = shead;
71
72 while (iter->next) {
73 if (iter->next == entry) {
74 // iter->next = iter->next->next
75 iter->next = entry->next;
76
77 entry->next = NULL;
78 entry->head = NULL;
79 return 0;
80 }
81 iter = iter->next;
82 }
83
84 BUG_ON("Not reachable");
85 }
86
87 /* Full-linear serach => Find AU with max. number of fclu */
88 static inline AU_INFO_T* amap_find_hot_au_largest(struct slist_head *shead)
89 {
90 struct slist_head *iter;
91 uint16_t max_fclu = 0;
92 AU_INFO_T *entry, *ret = NULL;
93
94 ASSERT(shead->head == shead); /* Singly-list condition */
95 ASSERT(shead->next != shead);
96
97 iter = shead->next;
98
99 while (iter) {
100 entry = list_entry(iter, AU_INFO_T, shead);
101
102 if (entry->free_clusters > max_fclu) {
103 max_fclu = entry->free_clusters;
104 ret = entry;
105 }
106
107 iter = iter->next;
108 }
109
110 return ret;
111 }
112
113 /* Find partially used AU with max. number of fclu.
114 If there is no partial AU available, pick a clean one */
115 static inline AU_INFO_T* amap_find_hot_au_partial(AMAP_T *amap)
116 {
117 struct slist_head *iter;
118 uint16_t max_fclu = 0;
119 AU_INFO_T *entry, *ret = NULL;
120
121 iter = &amap->slist_hot;
122 ASSERT(iter->head == iter); /* Singly-list condition */
123 ASSERT(iter->next != iter);
124
125 iter = iter->next;
126
127 while (iter) {
128 entry = list_entry(iter, AU_INFO_T, shead);
129
130 if (entry->free_clusters > max_fclu) {
131 if (entry->free_clusters < amap->clusters_per_au) {
132 max_fclu = entry->free_clusters;
133 ret = entry;
134 } else {
135 if (!ret)
136 ret = entry;
137 }
138 }
139
140 iter = iter->next;
141 }
142
143 return ret;
144 }
145
146
147
148
149 /*
150 Size-base AU management functions
151 */
152
153 /*
154 Add au into cold AU MAP
155 au: an isolated (not in a list) AU data structure
156 */
157 int amap_add_cold_au(AMAP_T *amap, AU_INFO_T *au)
158 {
159 FCLU_NODE_T *fclu_node = NULL;
160
161 /* Check if a single entry */
162 BUG_ON(au->head.prev);
163
164 /* Ignore if the au is full */
165 if (!au->free_clusters)
166 return 0;
167
168 /* Find entry */
169 fclu_node = NODE(au->free_clusters, amap);
170
171 /* Insert to the list */
172 list_add_tail(&(au->head), &(fclu_node->head));
173
174 /* Update fclu_hint (Increase) */
175 if (au->free_clusters > amap->fclu_hint)
176 amap->fclu_hint = au->free_clusters;
177
178 return 0;
179 }
180
181 /*
182 Remove an AU from AU MAP
183 */
184 int amap_remove_cold_au(AMAP_T *amap, AU_INFO_T* au)
185 {
186 struct list_head *prev = au->head.prev;
187
188 /* Single entries are not managed in lists */
189 if (!prev) {
190 BUG_ON(au->free_clusters > 0);
191 return 0;
192 }
193
194 /* remove from list */
195 amap_list_del(&(au->head));
196
197 return 0;
198 }
199
200
201 /* "Find" best fit AU
202 returns NULL if there is no AU w/ enough free space.
203
204 This function doesn't change AU status.
205 The caller should call amap_remove_cold_au() if needed.
206 */
207 AU_INFO_T* amap_find_cold_au_bestfit(AMAP_T *amap, uint16_t free_clusters)
208 {
209 AU_INFO_T *au = NULL;
210 FCLU_NODE_T *fclu_iter;
211
212 if (free_clusters <= 0 || free_clusters > amap->clusters_per_au) {
213 EMSG("AMAP: amap_find_cold_au_bestfit / unexpected arg. (%d)\n",
214 free_clusters);
215 return NULL;
216 }
217
218 fclu_iter = NODE(free_clusters, amap);
219
220 if (amap->fclu_hint < free_clusters) {
221 /* There is no AUs with enough free_clusters */
222 return NULL;
223 }
224
225 /* Naive Hash management (++) */
226 do {
227 if (!list_empty(&fclu_iter->head)) {
228 struct list_head *first = fclu_iter->head.next;
229
230 au = list_entry(first, AU_INFO_T, head);
231
232 break;
233 }
234
235 fclu_iter++;
236 } while (fclu_iter < (amap->fclu_nodes + amap->clusters_per_au));
237
238
239 // BUG_ON(au->free_clusters < 0);
240 BUG_ON(au && (au->free_clusters > amap->clusters_per_au));
241
242 return au;
243 }
244
245
246 /* "Pop" best fit AU
247 returns NULL if there is no AU w/ enough free space.
248 The returned AU will not be in the list anymore.
249 */
250 AU_INFO_T* amap_pop_cold_au_bestfit(AMAP_T *amap, uint16_t free_clusters)
251 {
252 /* Naive implementation */
253 AU_INFO_T* au;
254
255 au = amap_find_cold_au_bestfit(amap, free_clusters);
256 if (au)
257 amap_remove_cold_au(amap, au);
258
259 return au;
260 }
261
262
263
264 /* Pop the AU with the largest free space
265
266 search from 'start_fclu' to 0
267 (target freecluster : -1 for each step)
268
269 start_fclu = 0 means to search from the max. value
270 */
271 AU_INFO_T* amap_pop_cold_au_largest(AMAP_T *amap, uint16_t start_fclu)
272 {
273 AU_INFO_T* au = NULL;
274 FCLU_NODE_T *fclu_iter;
275
276 if (!start_fclu)
277 start_fclu = amap->clusters_per_au;
278 if (start_fclu > amap->clusters_per_au)
279 start_fclu = amap->clusters_per_au;
280
281 /* Use hint (search start point) */
282 if (amap->fclu_hint < start_fclu)
283 fclu_iter = NODE(amap->fclu_hint, amap);
284 else
285 fclu_iter = NODE(start_fclu, amap);
286
287 /* Naive Hash management */
288 do {
289 if (!list_empty(&fclu_iter->head)) {
290 struct list_head *first = fclu_iter->head.next;
291
292 au = list_entry(first, AU_INFO_T, head);
293 // BUG_ON((au < amap->entries) || ((amap->entries + amap->n_au) <= au));
294
295 amap_list_del(first);
296
297 // (Hint) Possible maximum value of free clusters (among cold)
298 /* if it wasn't the whole search, don't update fclu_hint */
299 if (start_fclu == amap->clusters_per_au)
300 amap->fclu_hint = au->free_clusters;
301
302 break;
303 }
304
305 fclu_iter--;
306 } while (amap->fclu_nodes <= fclu_iter);
307
308 return au;
309 }
310
311
312
313 /*
314 ===============================================
315 Allocation Map related functions
316 ===============================================
317 */
318
319 /* Create AMAP related data structure (mount time) */
320 int amap_create(struct super_block *sb, u32 pack_ratio, u32 sect_per_au, u32 hidden_sect)
321 {
322 FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
323 AMAP_T *amap;
324 int total_used_clusters;
325 int n_au_table = 0;
326 int i, i_clu, i_au;
327 int i_au_root = -1, i_au_hot_from = INT_MAX;
328 u32 misaligned_sect = hidden_sect;
329
330 BUG_ON(!fsi->bd_opened);
331
332 if (fsi->amap)
333 return -EEXIST;
334
335 /* Check conditions */
336 if (fsi->vol_type != FAT32) {
337 sdfat_msg(sb, KERN_ERR, "smart allocation is only available "
338 "with fat32-fs");
339 return -ENOTSUPP;
340 }
341
342 if (fsi->num_sectors < AMAP_MIN_SUPPORT_SECTORS) {
343 sdfat_msg(sb, KERN_ERR, "smart allocation is only available "
344 "with sectors above %d", AMAP_MIN_SUPPORT_SECTORS);
345 return -ENOTSUPP;
346 }
347
348 /* AU size must be a multiple of clu_size */
349 if ((sect_per_au <= 0) || (sect_per_au & (fsi->sect_per_clus - 1))) {
350 sdfat_msg(sb, KERN_ERR,
351 "invalid AU size (sect_per_au : %u, "
352 "sect_per_clus : %u) "
353 "please re-format for performance.",
354 sect_per_au, fsi->sect_per_clus);
355 return -EINVAL;
356 }
357
358 /* the start sector of this partition must be a multiple of clu_size */
359 if (misaligned_sect & (fsi->sect_per_clus - 1)) {
360 sdfat_msg(sb, KERN_ERR,
361 "misaligned part (start sect : %u, "
362 "sect_per_clus : %u) "
363 "please re-format for performance.",
364 misaligned_sect, fsi->sect_per_clus);
365 return -EINVAL;
366 }
367
368 /* data start sector must be a multiple of clu_size */
369 if (fsi->data_start_sector & (fsi->sect_per_clus - 1)) {
370 sdfat_msg(sb, KERN_ERR,
371 "misaligned data area (start sect : %u, "
372 "sect_per_clus : %u) "
373 "please re-format for performance.",
374 fsi->data_start_sector, fsi->sect_per_clus);
375 return -EINVAL;
376 }
377
378 misaligned_sect &= (sect_per_au - 1);
379
380 /* Allocate data structrues */
381 amap = kzalloc(sizeof(AMAP_T), GFP_NOIO);
382 if (!amap)
383 return -ENOMEM;
384
385 amap->sb = sb;
386
387 amap->n_au = (fsi->num_sectors + misaligned_sect + sect_per_au - 1) / sect_per_au;
388 amap->n_clean_au = 0;
389 amap->n_full_au = 0;
390
391 /* Reflect block-partition align first,
392 then partition-data_start align */
393 amap->clu_align_bias = (misaligned_sect / fsi->sect_per_clus);
394 amap->clu_align_bias += (fsi->data_start_sector >> fsi->sect_per_clus_bits) - CLUS_BASE;
395 amap->clusters_per_au = sect_per_au / fsi->sect_per_clus;
396
397 /* That is,
398 * the size of cluster is at least 4KB if the size of AU is 4MB
399 */
400 if (amap->clusters_per_au > MAX_CLU_PER_AU) {
401 sdfat_log_msg(sb, KERN_INFO,
402 "too many clusters per AU (clus/au:%d > %d).",
403 amap->clusters_per_au,
404 MAX_CLU_PER_AU);
405 }
406
407 /* is it needed? why here? */
408 // set_sb_dirty(sb);
409
410 spin_lock_init(&amap->amap_lock);
411
412 amap->option.packing_ratio = pack_ratio;
413 amap->option.au_size = sect_per_au;
414 amap->option.au_align_factor = hidden_sect;
415
416
417 /* Allocate AU info table */
418 n_au_table = (amap->n_au + N_AU_PER_TABLE - 1) / N_AU_PER_TABLE;
419 amap->au_table = kmalloc(sizeof(AU_INFO_T *) * n_au_table, GFP_NOIO);
420
421 if (!amap->au_table) {
422 sdfat_msg(sb, KERN_ERR,
423 "failed to alloc amap->au_table\n");
424 kfree(amap);
425 return -ENOMEM;
426 }
427
428 for (i = 0; i < n_au_table; i++)
429 amap->au_table[i] = (AU_INFO_T *)get_zeroed_page(GFP_NOIO);
430
431 /* Allocate buckets indexed by # of free clusters */
432 amap->fclu_order = get_order(sizeof(FCLU_NODE_T) * amap->clusters_per_au);
433
434 // XXX: amap->clusters_per_au limitation is 512 (w/ 8 byte list_head)
435 sdfat_log_msg(sb, KERN_INFO, "page orders for AU nodes : %d "
436 "(clus_per_au : %d, node_size : %lu)",
437 amap->fclu_order,
438 amap->clusters_per_au,
439 (unsigned long)sizeof(FCLU_NODE_T));
440
441 if (!amap->fclu_order)
442 amap->fclu_nodes = (FCLU_NODE_T*)get_zeroed_page(GFP_NOIO);
443 else
444 amap->fclu_nodes = (FCLU_NODE_T*)vzalloc(PAGE_SIZE << amap->fclu_order);
445
446 amap->fclu_hint = amap->clusters_per_au;
447
448 /* Hot AU list, ignored AU list */
449 amap->slist_hot.next = NULL;
450 amap->slist_hot.head = &amap->slist_hot;
451 amap->total_fclu_hot = 0;
452
453 amap->slist_ignored.next = NULL;
454 amap->slist_ignored.head = &amap->slist_ignored;
455
456 /* Strategy related vars. */
457 amap->cur_cold.au = NULL;
458 amap->cur_hot.au = NULL;
459 amap->n_need_packing = 0;
460
461
462 /* Build AMAP info */
463 total_used_clusters = 0; // Count # of used clusters
464
465 i_au_root = i_AU_of_CLU(amap, fsi->root_dir);
466 i_au_hot_from = amap->n_au - (SMART_ALLOC_N_HOT_AU - 1);
467
468 for (i = 0; i < amap->clusters_per_au; i++)
469 INIT_LIST_HEAD(&amap->fclu_nodes[i].head);
470
471
472 /*
473 Thanks to kzalloc()
474 amap->entries[i_au].free_clusters = 0;
475 amap->entries[i_au].head.prev = NULL;
476 amap->entries[i_au].head.next = NULL;
477 */
478
479 /* Parse FAT table */
480 for (i_clu = CLUS_BASE; i_clu < fsi->num_clusters; i_clu++){
481 u32 clu_data;
482 AU_INFO_T *au;
483
484 if (fat_ent_get(sb, i_clu, &clu_data)) {
485 sdfat_msg(sb, KERN_ERR,
486 "failed to read fat entry(%u)\n", i_clu);
487 goto free_and_eio;
488 }
489
490 if (IS_CLUS_FREE(clu_data)) {
491 au = GET_AU(amap, i_AU_of_CLU(amap, i_clu));
492 au->free_clusters++;
493 } else
494 total_used_clusters++;
495 }
496
497 /* Build AU list */
498 for (i_au = 0; i_au < amap->n_au; i_au++){
499 AU_INFO_T *au = GET_AU(amap, i_au);
500
501 au->idx = i_au;
502 BUG_ON(au->free_clusters > amap->clusters_per_au);
503
504 if (au->free_clusters == amap->clusters_per_au)
505 amap->n_clean_au++;
506 else if (au->free_clusters == 0)
507 amap->n_full_au++;
508
509 /* If hot, insert to the hot list */
510 if (i_au >= i_au_hot_from) {
511 amap_add_hot_au(amap, au);
512 amap->total_fclu_hot += au->free_clusters;
513 } else if (i_au != i_au_root || SMART_ALLOC_N_HOT_AU == 0) {
514 /* Otherwise, insert to the free cluster hash */
515 amap_add_cold_au(amap, au);
516 }
517 }
518
519 /* Hot list -> (root) -> (last) -> (last - 1) -> ... */
520 if (i_au_root >= 0 && SMART_ALLOC_N_HOT_AU > 0) {
521 amap_add_hot_au(amap, GET_AU(amap, i_au_root));
522 amap->total_fclu_hot += GET_AU(amap, i_au_root)->free_clusters;
523 }
524
525
526 fsi->amap = amap;
527 fsi->used_clusters = total_used_clusters;
528
529 sdfat_msg(sb, KERN_INFO,
530 "AMAP: Smart allocation enabled (opt : %u / %u / %u)",
531 amap->option.au_size, amap->option.au_align_factor,
532 amap->option.packing_ratio);
533
534 /* Debug purpose - check */
535 //{
536 //u32 used_clusters;
537 //fat_count_used_clusters(sb, &used_clusters)
538 //ASSERT(used_clusters == total_used_clusters);
539 //}
540
541 return 0;
542
543
544 free_and_eio:
545 if (amap) {
546 if (amap->au_table) {
547 for (i = 0; i < n_au_table; i++)
548 free_page((unsigned long)amap->au_table[i]);
549 kfree(amap->au_table);
550 }
551 if (amap->fclu_nodes) {
552 if (!amap->fclu_order)
553 free_page((unsigned long)amap->fclu_nodes);
554 else
555 vfree(amap->fclu_nodes);
556 }
557 kfree(amap);
558 }
559 return -EIO;
560 }
561
562
563 /* Free AMAP related structure */
564 void amap_destroy(struct super_block *sb)
565 {
566 AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
567 int n_au_table;
568
569 if (!amap)
570 return;
571
572 DMSG("%s\n", __func__);
573
574 n_au_table = (amap->n_au + N_AU_PER_TABLE - 1) / N_AU_PER_TABLE;
575
576 if (amap->au_table) {
577 int i;
578 for (i = 0; i < n_au_table; i++)
579 free_page((unsigned long)amap->au_table[i]);
580
581 kfree(amap->au_table);
582 }
583 if (!amap->fclu_order)
584 free_page((unsigned long)amap->fclu_nodes);
585 else
586 vfree(amap->fclu_nodes);
587 kfree(amap);
588 SDFAT_SB(sb)->fsi.amap = NULL;
589
590 return;
591 }
592
593
594 /*
595 Check status of FS
596 and change destination if needed to disable AU-aligned alloc.
597 (from ALLOC_COLD_ALIGNED to ALLOC_COLD_SEQ)
598 */
599 static inline int amap_update_dest(AMAP_T *amap, int ori_dest)
600 {
601 FS_INFO_T *fsi = &(SDFAT_SB(amap->sb)->fsi);
602 int n_partial_au, n_partial_freeclus;
603
604 if (ori_dest != ALLOC_COLD_ALIGNED)
605 return ori_dest;
606
607 /* # of partial AUs and # of clusters in those AUs */
608 n_partial_au = amap->n_au - amap->n_clean_au - amap->n_full_au;
609 n_partial_freeclus = fsi->num_clusters - fsi->used_clusters -
610 amap->clusters_per_au * amap->n_clean_au;
611
612 /* Status of AUs : Full / Partial / Clean
613 If there are many partial (and badly fragmented) AUs,
614 the throughput will decrease extremly.
615
616 The follow code will treat those worst cases.
617 */
618
619 // XXX: AMAP heuristics
620 if ((amap->n_clean_au * 50 <= amap->n_au) &&
621 (n_partial_freeclus*2) < (n_partial_au*amap->clusters_per_au)) {
622 /* If clean AUs are fewer than 2% of n_au (80 AUs per 16GB)
623 and fragment ratio is more than 2 (AVG free_clusters=half AU)
624
625 disable clean-first allocation
626 enable VFAT-like sequential allocation
627 */
628
629 return ALLOC_COLD_SEQ;
630 }
631
632 return ori_dest;
633 }
634
635
636 #define PACKING_SOFTLIMIT (amap->option.packing_ratio)
637 #define PACKING_HARDLIMIT (amap->option.packing_ratio * 4)
638 /*
639 Pick a packing AU if needed.
640 Otherwise just return NULL
641
642 This function includes some heuristics.
643 */
644 static inline AU_INFO_T* amap_get_packing_au(AMAP_T *amap, int dest, int num_to_wb, int *clu_to_skip)
645 {
646 AU_INFO_T* au = NULL;
647
648 if (dest == ALLOC_COLD_PACKING) {
649 /* ALLOC_COLD_PACKING:
650 Packing-first mode for defrag.
651 Optimized to save clean AU
652
653 1) best-fit AU
654 2) Smallest AU (w/ minimum free clusters)
655 */
656
657 if (num_to_wb >= amap->clusters_per_au)
658 num_to_wb = num_to_wb % amap->clusters_per_au;
659
660 /* 이거 주석처리하면, AU size 딱 맞을때는 clean, 나머지는 작은거부터 */
661 if (num_to_wb == 0)
662 num_to_wb = 1; // Don't use clean AUs
663
664 au = amap_find_cold_au_bestfit(amap, num_to_wb);
665
666 if (au && au->free_clusters == amap->clusters_per_au && num_to_wb > 1) {
667 // if au is clean then get a new partial one
668 au = amap_find_cold_au_bestfit(amap, 1);
669 }
670
671 if (au) {
672 amap->n_need_packing = 0;
673 amap_remove_cold_au(amap, au);
674 return au;
675 }
676 }
677
678
679 /* Heuristic packing:
680 This will improve QoS greatly.
681
682 Count # of AU_ALLIGNED allocation.
683 If the number exceeds the specific threshold,
684 allocate on a partial AU or generate random I/O.
685 */
686 if ((PACKING_SOFTLIMIT > 0) && \
687 (amap->n_need_packing >= PACKING_SOFTLIMIT) && \
688 (num_to_wb < (int)amap->clusters_per_au) ){
689 /* Best-fit packing:
690 If num_to_wb (expected number to be allocated) is smaller than AU_SIZE,
691 find a best-fit AU.
692 */
693
694 // Back margin (heuristics)
695 if (num_to_wb < amap->clusters_per_au / 4)
696 num_to_wb = amap->clusters_per_au / 4;
697
698 au = amap_find_cold_au_bestfit(amap, num_to_wb);
699
700 if ((au != NULL)) {
701 amap_remove_cold_au(amap, au);
702
703 MMSG("AMAP: packing (cnt: %d) / softlimit, "
704 "best-fit (num_to_wb: %d))\n",
705 amap->n_need_packing, num_to_wb);
706
707 if (au->free_clusters > num_to_wb) { // Best-fit search: if 문 무조건 hit
708 *clu_to_skip = au->free_clusters - num_to_wb;
709 /* otherwise don't skip */
710 }
711 amap->n_need_packing = 0;
712 return au;
713 }
714 }
715
716 if (PACKING_HARDLIMIT != 0 && \
717 amap->n_need_packing >= PACKING_HARDLIMIT) {
718 /* Compulsory SLC flushing:
719 If there was no chance to do best-fit packing
720 and the # of AU-aligned allocation exceeds HARD threshold,
721 then pick a clean AU and generate a compulsory random I/O.
722 */
723 au = amap_pop_cold_au_largest(amap, amap->clusters_per_au);
724
725 if (au) {
726 MMSG("AMAP: packing (cnt: %d) / hard-limit, largest)\n",
727 amap->n_need_packing);
728
729 if (au->free_clusters >= 96) {
730 *clu_to_skip = au->free_clusters / 2;
731 MMSG("AMAP: cluster idx re-position\n");
732 }
733 amap->n_need_packing = 0;
734 return au;
735 }
736 }
737
738 /* Update # of clean AU allocation */
739 amap->n_need_packing++;
740 return NULL;
741 }
742
743
744 /* Pick a target AU
745 - This function should be called only if there are one or more free clusters in the bdev.
746 */
747 TARGET_AU_T *amap_get_target_au(AMAP_T *amap, int dest, int num_to_wb)
748 {
749 int loop_count = 0;
750
751 retry:
752 if (++loop_count >= 3) {
753 /* No space available (or AMAP consistency error)
754 This could happen because of the ignored AUs
755 but not likely
756 (because the defrag daemon will not work if there is no enough space)
757 */
758 BUG_ON(amap->slist_ignored.next == NULL);
759 return NULL;
760 }
761
762 /* Hot clusters (DIR) */
763 if (dest == ALLOC_HOT) {
764
765 /* Working hot AU exist? */
766 if (amap->cur_hot.au == NULL || amap->cur_hot.au->free_clusters == 0) {
767 AU_INFO_T *au;
768
769 if (amap->total_fclu_hot == 0) {
770 /* No more hot AU avaialbe */
771 dest = ALLOC_COLD;
772
773 goto retry;
774 }
775
776 au = amap_find_hot_au_partial(amap);
777
778 BUG_ON(au == NULL);
779 BUG_ON(au->free_clusters <= 0);
780
781 amap->cur_hot.au = au;
782 amap->cur_hot.idx = 0;
783 amap->cur_hot.clu_to_skip = 0;
784 }
785
786 /* Now allocate on a hot AU */
787 return &amap->cur_hot;
788 }
789
790
791 /* Cold allocation:
792 If amap->cur_cold.au has one or more free cluster(s),
793 then just return amap->cur_cold
794 */
795 if ( (!amap->cur_cold.au) \
796 || (amap->cur_cold.idx == amap->clusters_per_au) \
797 || (amap->cur_cold.au->free_clusters == 0)) {
798
799 AU_INFO_T *au = NULL;
800 const AU_INFO_T *old_au = amap->cur_cold.au;
801 int n_clu_to_skip = 0;
802
803 if (old_au) {
804 ASSERT(!IS_AU_WORKING(old_au, amap));
805 // must be NOT WORKING AU. (only for information gathering)
806 }
807
808 /* Next target AU is needed:
809 There are 3 possible ALLOC options for cold AU
810
811 ALLOC_COLD_ALGINED: Clean AU first, but heuristic packing is ON
812 ALLOC_COLD_PACKING: Packing AU first (usually for defrag)
813 ALLOC_COLD_SEQ : Sequential AU allocation (VFAT-like)
814 */
815
816
817 /* Experimental: Modify allocation destination if needed (ALIGNED => SEQ) */
818 // dest = amap_update_dest(amap, dest);
819
820 if ((dest == ALLOC_COLD_SEQ) && old_au) {
821 int i_au = old_au->idx + 1;
822
823 while (i_au != old_au->idx) {
824 au = GET_AU(amap, i_au);
825
826 if ((au->free_clusters > 0) &&
827 !IS_AU_HOT(au, amap) &&
828 !IS_AU_IGNORED(au, amap)) {
829
830 MMSG("AMAP: new cold AU(%d) with %d "
831 "clusters (seq)\n",
832 au->idx, au->free_clusters);
833
834 amap_remove_cold_au(amap, au);
835 goto ret_new_cold;
836 }
837 i_au++;
838 if (i_au >= amap->n_au)
839 i_au = 0;
840 }
841
842 // no cold AUs are available => Hot allocation
843 dest = ALLOC_HOT;
844 goto retry;
845 }
846
847
848 /*
849 * Check if packing is needed
850 * (ALLOC_COLD_PACKING is treated by this function)
851 */
852 au = amap_get_packing_au(amap, dest, num_to_wb, &n_clu_to_skip);
853 if (au) {
854 MMSG("AMAP: new cold AU(%d) with %d clusters "
855 "(packing)\n", au->idx, au->free_clusters);
856 goto ret_new_cold;
857 }
858
859 /* ALLOC_COLD_ALIGNED */
860 /* Check if the adjacent AU is clean */
861 if (old_au && ((old_au->idx + 1) < amap->n_au)) {
862 au = GET_AU(amap, old_au->idx + 1);
863
864 if ((au->free_clusters == amap->clusters_per_au) &&
865 !IS_AU_HOT(au, amap) &&
866 !IS_AU_IGNORED(au, amap)) {
867 MMSG("AMAP: new cold AU(%d) with %d clusters "
868 "(adjacent)\n", au->idx, au->free_clusters);
869 amap_remove_cold_au(amap, au);
870 goto ret_new_cold;
871 }
872 }
873
874 /* Clean or largest AU */
875 au = amap_pop_cold_au_largest(amap, 0);
876 if (!au) {
877 //ASSERT(amap->total_fclu_hot == (fsi->num_clusters - fsi->used_clusters - 2));
878 dest = ALLOC_HOT;
879 goto retry;
880 }
881
882 MMSG("AMAP: New cold AU (%d) with %d clusters\n", \
883 au->idx, au->free_clusters);
884
885 ret_new_cold:
886 SET_AU_WORKING(au);
887
888 amap->cur_cold.au = au;
889 amap->cur_cold.idx = 0;
890 amap->cur_cold.clu_to_skip = n_clu_to_skip;
891 }
892
893 return &amap->cur_cold;
894 }
895
896 /* Put and update target AU */
897 void amap_put_target_au(AMAP_T *amap, TARGET_AU_T *cur, int num_allocated)
898 {
899 /* Update AMAP info vars. */
900 if (num_allocated > 0 && \
901 (cur->au->free_clusters + num_allocated) == amap->clusters_per_au)
902 // if the target AU was a clean AU before this allocation ...
903 amap->n_clean_au--;
904 if (num_allocated > 0 && \
905 cur->au->free_clusters == 0)
906 amap->n_full_au++;
907
908
909 if (IS_AU_HOT(cur->au, amap)) {
910 /* Hot AU */
911 MMSG("AMAP: hot allocation at AU %d\n", cur->au->idx);
912 amap->total_fclu_hot -= num_allocated;
913
914 /* Intra-AU round-robin */
915 if (cur->idx >= amap->clusters_per_au)
916 cur->idx = 0;
917
918 /* No more space available */
919 if (cur->au->free_clusters == 0)
920 cur->au = NULL;
921
922 } else {
923 /* non-hot AU */
924 ASSERT(IS_AU_WORKING(cur->au, amap));
925
926 if (cur->idx >= amap->clusters_per_au || cur->au->free_clusters == 0) {
927 /* It should be inserted back to AU MAP */
928 cur->au->shead.head = NULL; // SET_AU_NOT_WORKING
929 amap_add_cold_au(amap, cur->au);
930
931 // cur->au = NULL; // This value will be used for the next AU selection
932 cur->idx = amap->clusters_per_au; // AU closing
933 }
934 }
935
936 }
937
938
939 /* Reposition target->idx for packing
940 (Heuristics)
941
942 Skip (num_to_skip) free clusters in (cur->au)
943 */
944 static inline int amap_skip_cluster(struct super_block *sb, TARGET_AU_T *cur, int num_to_skip)
945 {
946 AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
947 u32 clu, read_clu;
948 MMSG_VAR(int num_to_skip_orig = num_to_skip);
949
950 if (num_to_skip >= cur->au->free_clusters) {
951 EMSG("AMAP(%s): skip mis-use. amap_566\n", __func__);
952 return -EIO;
953 }
954
955 clu = CLU_of_i_AU(amap, cur->au->idx, cur->idx);
956
957 while (num_to_skip > 0) {
958 if (clu >= CLUS_BASE) {
959 /* Cf.
960 * If AMAP's integrity is okay,
961 * we don't need to check if (clu < fsi->num_clusters)
962 */
963
964 if (fat_ent_get(sb, clu, &read_clu))
965 return -EIO;
966
967 if (IS_CLUS_FREE(read_clu))
968 num_to_skip--;
969 }
970
971 // Move clu->idx
972 clu++;
973 (cur->idx)++;
974
975 if (cur->idx >= amap->clusters_per_au) {
976 /* End of AU (Not supposed) */
977 EMSG("AMAP: Skip - End of AU?! (amap_596)\n");
978 cur->idx = 0;
979 return -EIO;
980 }
981 }
982
983 MMSG("AMAP: Skip_clusters (%d skipped => %d, among %d free clus)\n",\
984 num_to_skip_orig, cur->idx, cur->au->free_clusters);
985
986 return 0;
987 }
988
989
990 /* AMAP-based allocation function for FAT32 */
991 s32 amap_fat_alloc_cluster(struct super_block *sb, s32 num_alloc, CHAIN_T *p_chain, int dest)
992 {
993 AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
994 TARGET_AU_T *cur = NULL;
995 AU_INFO_T *target_au = NULL; /* Allocation target AU */
996 u32 last_clu = CLUS_EOF, read_clu;
997 s32 new_clu; // Max. 2G 개의 clusters
998 s32 num_allocated = 0, num_allocated_each = 0;
999 FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
1000
1001 BUG_ON(!amap);
1002 BUG_ON(IS_CLUS_EOF(fsi->used_clusters));
1003
1004 p_chain->dir = CLUS_EOF;
1005
1006 if ((fsi->used_clusters + num_alloc) > (fsi->num_clusters - CLUS_BASE)) {
1007 /* Reserved count management error
1008 or called by dir. management function on fully filled disk */
1009 num_alloc = fsi->num_clusters - fsi->used_clusters - CLUS_BASE;
1010
1011 if (unlikely(num_alloc < 0)) {
1012 sdfat_fs_error_ratelimit(sb,
1013 "AMAP(%s): invalid used clusters(t:%u,u:%u)\n",
1014 __func__, fsi->num_clusters, fsi->used_clusters);
1015 return -EIO;
1016 }
1017
1018 if (!num_alloc)
1019 return 0;
1020 }
1021
1022 set_sb_dirty(sb);
1023
1024 // spin_lock(&amap->amap_lock);
1025
1026 retry_alloc:
1027 /* Allocation strategy implemented */
1028 cur = amap_get_target_au(amap, dest, fsi->reserved_clusters);
1029 if (unlikely(!cur)) {
1030 // There is no available AU (only ignored-AU are left)
1031 sdfat_msg(sb, KERN_ERR, "AMAP Allocator: no avaialble AU.");
1032 return 0;
1033 }
1034
1035 /* If there are clusters to skip */
1036 if (cur->clu_to_skip > 0) {
1037 if (amap_skip_cluster(sb, &amap->cur_cold, cur->clu_to_skip))
1038 return -EIO;
1039 cur->clu_to_skip = 0;
1040 }
1041
1042 target_au = cur->au;
1043
1044
1045 /*
1046 * cur->au : target AU info pointer
1047 * cur->idx : the intra-cluster idx in the AU to start from
1048 */
1049
1050 BUG_ON(!cur->au);
1051 BUG_ON(!cur->au->free_clusters);
1052 BUG_ON(cur->idx >= amap->clusters_per_au);
1053
1054 num_allocated_each = 0;
1055 new_clu = CLU_of_i_AU(amap, target_au->idx, cur->idx);
1056
1057 do {
1058 /* Allocate at the target AU */
1059 if ((new_clu >= CLUS_BASE) && (new_clu < fsi->num_clusters)) {
1060 if (fat_ent_get(sb, new_clu, &read_clu))
1061 // spin_unlock(&amap->amap_lock);
1062 return -EIO; // goto err_and_return
1063
1064 if (IS_CLUS_FREE(read_clu)) {
1065 BUG_ON(GET_AU(amap, i_AU_of_CLU(amap, new_clu)) != target_au);
1066
1067 /* Free cluster found */
1068 if (fat_ent_set(sb, new_clu, CLUS_EOF))
1069 return -EIO;
1070
1071 num_allocated_each++;
1072
1073 if (IS_CLUS_EOF(p_chain->dir))
1074 p_chain->dir = new_clu;
1075 else
1076 if (fat_ent_set(sb, last_clu, new_clu))
1077 return -EIO;
1078
1079 last_clu = new_clu;
1080
1081 /* Update au info */
1082 target_au->free_clusters--;
1083 }
1084
1085 }
1086
1087 new_clu++;
1088 (cur->idx)++;
1089
1090 /* End of the AU */
1091 if ((cur->idx >= amap->clusters_per_au) || !(target_au->free_clusters))
1092 break;
1093 } while(num_allocated_each < num_alloc);
1094
1095
1096 /* Update strategy info */
1097 amap_put_target_au(amap, cur, num_allocated_each);
1098
1099
1100 num_allocated += num_allocated_each;
1101 fsi->used_clusters += num_allocated_each;
1102 num_alloc -= num_allocated_each;
1103
1104
1105 if (num_alloc > 0)
1106 goto retry_alloc;
1107
1108 // spin_unlock(&amap->amap_lock);
1109 return num_allocated;
1110 }
1111
1112
1113 /* Free cluster for FAT32 (not implemented yet) */
1114 s32 amap_free_cluster(struct super_block *sb, CHAIN_T *p_chain, s32 do_relse)
1115 {
1116 return -ENOTSUPP;
1117 }
1118
1119
1120 /*
1121 This is called by fat_free_cluster()
1122 to update AMAP info.
1123 */
1124 s32 amap_release_cluster(struct super_block *sb, u32 clu)
1125 {
1126 AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
1127 AU_INFO_T *au;
1128 int i_au;
1129
1130 // spin_lock(&amap->amap_lock);
1131
1132 /* Update AU info */
1133 i_au = i_AU_of_CLU(amap, clu);
1134 BUG_ON(i_au >= amap->n_au);
1135 au = GET_AU(amap, i_au);
1136 BUG_ON(au->free_clusters >= amap->clusters_per_au);
1137
1138 if (IS_AU_HOT(au, amap)) {
1139 MMSG("AMAP: Hot cluster freed\n");
1140 au->free_clusters++;
1141 amap->total_fclu_hot++;
1142 } else if (!IS_AU_WORKING(au, amap) && !IS_AU_IGNORED(au, amap)) {
1143 /* Ordinary AU - update AU tree */
1144 // Can be optimized by implmenting amap_update_au
1145 amap_remove_cold_au(amap, au);
1146 au->free_clusters++;
1147 amap_add_cold_au(amap, au);
1148 } else
1149 au->free_clusters++;
1150
1151
1152 /* Update AMAP info */
1153 if (au->free_clusters == amap->clusters_per_au)
1154 amap->n_clean_au++;
1155 if (au->free_clusters == 1)
1156 amap->n_full_au--;
1157
1158 // spin_unlock(&amap->amap_lock);
1159 return 0;
1160 }
1161
1162
1163 /*
1164 Check if the cluster is in a working AU
1165 The caller should hold sb lock.
1166 This func. should be used only if smart allocation is on
1167 */
1168 s32 amap_check_working(struct super_block *sb, u32 clu)
1169 {
1170 AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
1171 AU_INFO_T *au;
1172
1173 BUG_ON(!amap);
1174
1175 au = GET_AU(amap, i_AU_of_CLU(amap, clu));
1176
1177 return (IS_AU_WORKING(au, amap));
1178 }
1179
1180
1181 /*
1182 Return the # of free clusters in that AU
1183 */
1184 s32 amap_get_freeclus(struct super_block *sb, u32 clu)
1185 {
1186 AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
1187 AU_INFO_T *au;
1188
1189 BUG_ON(!amap);
1190
1191 au = GET_AU(amap, i_AU_of_CLU(amap, clu));
1192
1193 return ((s32)au->free_clusters);
1194 }
1195
1196
1197 /*
1198 Add the AU containing 'clu' to the ignored AU list.
1199 The AU will not be used by the allocator.
1200
1201 XXX: Ignored counter needed
1202 */
1203 s32 amap_mark_ignore(struct super_block *sb, u32 clu)
1204 {
1205 AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
1206 AU_INFO_T *au;
1207
1208 BUG_ON(!amap);
1209
1210 au = GET_AU(amap, i_AU_of_CLU(amap, clu));
1211
1212
1213 if (IS_AU_HOT(au, amap)) {
1214 // Doesn't work with hot AUs
1215 return -EPERM;
1216 } else if (IS_AU_WORKING(au, amap)) {
1217 return -EBUSY;
1218 }
1219
1220 //BUG_ON(IS_AU_IGNORED(au, amap) && (GET_IGN_CNT(au) == 0));
1221 if (IS_AU_IGNORED(au, amap))
1222 return 0;
1223
1224 amap_remove_cold_au(amap, au);
1225 amap_insert_to_list(au, &amap->slist_ignored);
1226
1227 BUG_ON(!IS_AU_IGNORED(au, amap));
1228
1229 //INC_IGN_CNT(au);
1230
1231 MMSG("AMAP: Mark ignored AU (%d)\n", au->idx);
1232
1233 return 0;
1234 }
1235
1236
1237 /*
1238 This function could be used only on IGNORED AUs.
1239 The caller should care whether it's ignored or not before using this func.
1240 */
1241 s32 amap_unmark_ignore(struct super_block *sb, u32 clu)
1242 {
1243 AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
1244 AU_INFO_T *au;
1245
1246 BUG_ON(!amap);
1247
1248 au = GET_AU(amap, i_AU_of_CLU(amap, clu));
1249
1250 BUG_ON(!IS_AU_IGNORED(au, amap));
1251 // BUG_ON(GET_IGN_CNT(au) == 0);
1252
1253 amap_remove_from_list(au, &amap->slist_ignored);
1254 amap_add_cold_au(amap, au);
1255
1256 BUG_ON(IS_AU_IGNORED(au, amap));
1257
1258 //DEC_IGN_CNT(au);
1259
1260 MMSG("AMAP: Unmark ignored AU (%d)\n", au->idx);
1261
1262 return 0;
1263 }
1264
1265 /*
1266 Unmark all ignored AU
1267 This will return # of unmarked AUs
1268 */
1269 s32 amap_unmark_ignore_all(struct super_block *sb)
1270 {
1271 AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
1272 struct slist_head *entry;
1273 AU_INFO_T *au;
1274 int n = 0;
1275
1276 BUG_ON(!amap);
1277
1278 entry = amap->slist_ignored.next;
1279 while (entry) {
1280 au = list_entry(entry, AU_INFO_T, shead);
1281
1282 BUG_ON(au != GET_AU(amap, au->idx));
1283 BUG_ON(!IS_AU_IGNORED(au, amap));
1284
1285 //CLEAR_IGN_CNT(au);
1286
1287 amap_remove_from_list(au, &amap->slist_ignored);
1288 amap_add_cold_au(amap, au);
1289
1290 MMSG("AMAP: Unmark ignored AU (%d)\n", au->idx);
1291 n++;
1292
1293 entry = amap->slist_ignored.next;
1294 }
1295
1296 BUG_ON(amap->slist_ignored.next != NULL);
1297 MMSG("AMAP: unmark_ignore_all, total %d AUs\n", n);
1298
1299 return n;
1300 }
1301
1302 /**
1303 * @fn amap_get_au_stat
1304 * @brief report AUs status depending on mode
1305 * @return positive on success, 0 otherwise
1306 * @param sbi super block info
1307 * @param mode TOTAL, CLEAN and FULL
1308 */
1309 u32 amap_get_au_stat(struct super_block *sb, s32 mode)
1310 {
1311 AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
1312
1313 if (!amap)
1314 return 0;
1315
1316 if (mode == VOL_AU_STAT_TOTAL)
1317 return amap->n_au;
1318 else if (mode == VOL_AU_STAT_CLEAN)
1319 return amap->n_clean_au;
1320 else if (mode == VOL_AU_STAT_FULL)
1321 return amap->n_full_au;
1322
1323 return 0;
1324 }
1325