2 * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
20 /************************************************************************/
22 /* PROJECT : exFAT & FAT12/16/32 File System */
24 /* PURPOSE : sdFAT Cache Manager */
25 /* (FAT Cache & Buffer Cache) */
27 /*----------------------------------------------------------------------*/
31 /************************************************************************/
33 #include <linux/swap.h> /* for mark_page_accessed() */
34 #include <asm/unaligned.h>
39 #define DEBUG_HASH_LIST
40 #define DEBUG_HASH_PREV (0xAAAA5555)
41 #define DEBUG_HASH_NEXT (0x5555AAAA)
43 /*----------------------------------------------------------------------*/
44 /* Global Variable Definitions */
45 /*----------------------------------------------------------------------*/
46 /* All buffer structures are protected w/ fsi->v_sem */
48 /*----------------------------------------------------------------------*/
49 /* Local Variable Definitions */
50 /*----------------------------------------------------------------------*/
55 /*----------------------------------------------------------------------*/
56 /* Cache handling function declarations */
57 /*----------------------------------------------------------------------*/
58 static cache_ent_t
*__fcache_find(struct super_block
*sb
, u32 sec
);
59 static cache_ent_t
*__fcache_get(struct super_block
*sb
, u32 sec
);
60 static void __fcache_insert_hash(struct super_block
*sb
, cache_ent_t
*bp
);
61 static void __fcache_remove_hash(cache_ent_t
*bp
);
63 static cache_ent_t
*__dcache_find(struct super_block
*sb
, u32 sec
);
64 static cache_ent_t
*__dcache_get(struct super_block
*sb
, u32 sec
);
65 static void __dcache_insert_hash(struct super_block
*sb
, cache_ent_t
*bp
);
66 static void __dcache_remove_hash(cache_ent_t
*bp
);
68 /*----------------------------------------------------------------------*/
69 /* Static functions */
70 /*----------------------------------------------------------------------*/
71 static void push_to_mru(cache_ent_t
*bp
, cache_ent_t
*list
)
73 bp
->next
= list
->next
;
75 list
->next
->prev
= bp
;
77 } /* end of __dcache_push_to_mru */
79 static void push_to_lru(cache_ent_t
*bp
, cache_ent_t
*list
)
81 bp
->prev
= list
->prev
;
83 list
->prev
->next
= bp
;
85 } /* end of __dcache_push_to_lru */
87 static void move_to_mru(cache_ent_t
*bp
, cache_ent_t
*list
)
89 bp
->prev
->next
= bp
->next
;
90 bp
->next
->prev
= bp
->prev
;
91 push_to_mru(bp
, list
);
92 } /* end of __dcache_move_to_mru */
94 static void move_to_lru(cache_ent_t
*bp
, cache_ent_t
*list
)
96 bp
->prev
->next
= bp
->next
;
97 bp
->next
->prev
= bp
->prev
;
98 push_to_lru(bp
, list
);
99 } /* end of __dcache_move_to_lru */
101 static inline s32
__check_hash_valid(cache_ent_t
*bp
)
103 #ifdef DEBUG_HASH_LIST
104 if ( (bp
->hash
.next
== (cache_ent_t
*)DEBUG_HASH_NEXT
) ||
105 (bp
->hash
.prev
== (cache_ent_t
*)DEBUG_HASH_PREV
) ) {
109 if ( (bp
->hash
.next
== bp
) || (bp
->hash
.prev
== bp
) )
115 static inline void __remove_from_hash(cache_ent_t
*bp
)
117 (bp
->hash
.prev
)->hash
.next
= bp
->hash
.next
;
118 (bp
->hash
.next
)->hash
.prev
= bp
->hash
.prev
;
121 #ifdef DEBUG_HASH_LIST
122 bp
->hash
.next
= (cache_ent_t
*)DEBUG_HASH_NEXT
;
123 bp
->hash
.prev
= (cache_ent_t
*)DEBUG_HASH_PREV
;
127 /* Do FAT mirroring (don't sync)
128 sec: sector No. in FAT1
131 static inline s32
__fat_copy(struct super_block
*sb
, u32 sec
, struct buffer_head
*bh
, int sync
)
133 #ifdef CONFIG_SDFAT_FAT_MIRRORING
134 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
137 if (fsi
->FAT2_start_sector
!= fsi
->FAT1_start_sector
) {
138 sec2
= sec
- fsi
->FAT1_start_sector
+ fsi
->FAT2_start_sector
;
139 BUG_ON(sec2
!= (sec
+ fsi
->num_FAT_sectors
));
141 MMSG("BD: fat mirroring (%d in FAT1, %d in FAT2)\n", sec
, sec2
);
142 if (write_sect(sb
, sec2
, bh
, sync
))
149 } /* end of __fat_copy */
152 * returns 1, if bp is flushed
153 * returns 0, if bp is not dirty
154 * returns -1, if error occurs
156 static s32
__fcache_ent_flush(struct super_block
*sb
, cache_ent_t
*bp
, u32 sync
)
158 if (!(bp
->flag
& DIRTYBIT
))
160 #ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
161 // Make buffer dirty (XXX: Naive impl.)
162 if (write_sect(sb
, bp
->sec
, bp
->bh
, 0))
165 if (__fat_copy(sb
, bp
->sec
, bp
->bh
, 0))
168 bp
->flag
&= ~(DIRTYBIT
);
171 sync_dirty_buffer(bp
->bh
);
176 static s32
__fcache_ent_discard(struct super_block
*sb
, cache_ent_t
*bp
)
178 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
179 __fcache_remove_hash(bp
);
187 move_to_lru(bp
, &fsi
->fcache
.lru_list
);
191 u8
*fcache_getblk(struct super_block
*sb
, u32 sec
)
194 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
195 u32 page_ra_count
= FCACHE_MAX_RA_SIZE
>> sb
->s_blocksize_bits
;
197 bp
= __fcache_find(sb
, sec
);
199 if (bdev_check_bdi_valid(sb
)) {
200 __fcache_ent_flush(sb
, bp
, 0);
201 __fcache_ent_discard(sb
, bp
);
204 move_to_mru(bp
, &fsi
->fcache
.lru_list
);
205 return(bp
->bh
->b_data
);
208 bp
= __fcache_get(sb
, sec
);
210 if (!__check_hash_valid(bp
))
211 __fcache_remove_hash(bp
);
215 __fcache_insert_hash(sb
, bp
);
217 /* Naive FAT read-ahead (increase I/O unit to page_ra_count) */
218 if ((sec
& (page_ra_count
- 1)) == 0)
219 bdev_readahead(sb
, sec
, page_ra_count
);
222 * patch 1.2.4 : buffer_head null pointer exception problem.
224 * When read_sect is failed, fcache should be moved to
225 * EMPTY hash_list and the first of lru_list.
227 if (read_sect(sb
, sec
, &(bp
->bh
), 1)) {
228 __fcache_ent_discard(sb
, bp
);
232 return bp
->bh
->b_data
;
235 static inline int __mark_delayed_dirty(struct super_block
*sb
, cache_ent_t
*bp
)
237 #ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
238 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
239 if (fsi
->vol_type
== EXFAT
)
242 bp
->flag
|= DIRTYBIT
;
251 s32
fcache_modify(struct super_block
*sb
, u32 sec
)
255 bp
= __fcache_find(sb
, sec
);
259 if (!__mark_delayed_dirty(sb
, bp
))
262 if (write_sect(sb
, sec
, bp
->bh
, 0))
265 if (__fat_copy(sb
, sec
, bp
->bh
, 0))
271 /*======================================================================*/
272 /* Cache Initialization Functions */
273 /*======================================================================*/
274 s32
meta_cache_init(struct super_block
*sb
)
276 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
280 fsi
->fcache
.lru_list
.next
= &fsi
->fcache
.lru_list
;
281 fsi
->fcache
.lru_list
.prev
= fsi
->fcache
.lru_list
.next
;
283 for (i
= 0; i
< FAT_CACHE_SIZE
; i
++) {
284 fsi
->fcache
.pool
[i
].sec
= ~0;
285 fsi
->fcache
.pool
[i
].flag
= 0;
286 fsi
->fcache
.pool
[i
].bh
= NULL
;
287 fsi
->fcache
.pool
[i
].prev
= NULL
;
288 fsi
->fcache
.pool
[i
].next
= NULL
;
289 push_to_mru(&(fsi
->fcache
.pool
[i
]), &fsi
->fcache
.lru_list
);
292 fsi
->dcache
.lru_list
.next
= &fsi
->dcache
.lru_list
;
293 fsi
->dcache
.lru_list
.prev
= fsi
->dcache
.lru_list
.next
;
294 fsi
->dcache
.keep_list
.next
= &fsi
->dcache
.keep_list
;
295 fsi
->dcache
.keep_list
.prev
= fsi
->dcache
.keep_list
.next
;
297 // Initially, all the BUF_CACHEs are in the LRU list
298 for (i
= 0; i
< BUF_CACHE_SIZE
; i
++) {
299 fsi
->dcache
.pool
[i
].sec
= ~0;
300 fsi
->dcache
.pool
[i
].flag
= 0;
301 fsi
->dcache
.pool
[i
].bh
= NULL
;
302 fsi
->dcache
.pool
[i
].prev
= NULL
;
303 fsi
->dcache
.pool
[i
].next
= NULL
;
304 push_to_mru(&(fsi
->dcache
.pool
[i
]), &fsi
->dcache
.lru_list
);
308 for (i
= 0; i
< FAT_CACHE_HASH_SIZE
; i
++) {
309 fsi
->fcache
.hash_list
[i
].sec
= ~0;
310 fsi
->fcache
.hash_list
[i
].hash
.next
= &(fsi
->fcache
.hash_list
[i
]);
312 fsi
->fcache
.hash_list
[i
].hash
.prev
= fsi
->fcache
.hash_list
[i
].hash
.next
;
315 for (i
= 0; i
< FAT_CACHE_SIZE
; i
++)
316 __fcache_insert_hash(sb
, &(fsi
->fcache
.pool
[i
]));
318 for (i
= 0; i
< BUF_CACHE_HASH_SIZE
; i
++) {
319 fsi
->dcache
.hash_list
[i
].sec
= ~0;
320 fsi
->dcache
.hash_list
[i
].hash
.next
= &(fsi
->dcache
.hash_list
[i
]);
322 fsi
->dcache
.hash_list
[i
].hash
.prev
= fsi
->dcache
.hash_list
[i
].hash
.next
;
325 for (i
= 0; i
< BUF_CACHE_SIZE
; i
++)
326 __dcache_insert_hash(sb
, &(fsi
->dcache
.pool
[i
]));
331 s32
meta_cache_shutdown(struct super_block
*sb
)
336 /*======================================================================*/
337 /* FAT Read/Write Functions */
338 /*======================================================================*/
339 s32
fcache_release_all(struct super_block
*sb
)
343 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
346 bp
= fsi
->fcache
.lru_list
.next
;
347 while (bp
!= &fsi
->fcache
.lru_list
) {
348 s32 ret_tmp
= __fcache_ent_flush(sb
, bp
, 0);
364 DMSG("BD:Release / dirty fat cache: %d (err:%d)\n", dirtycnt
, ret
);
369 /* internal DIRTYBIT marked => bh dirty */
370 s32
fcache_flush(struct super_block
*sb
, u32 sync
)
374 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
377 bp
= fsi
->fcache
.lru_list
.next
;
378 while (bp
!= &fsi
->fcache
.lru_list
) {
379 ret
= __fcache_ent_flush(sb
, bp
, sync
);
387 MMSG("BD: flush / dirty fat cache: %d (err:%d)\n", dirtycnt
, ret
);
391 static cache_ent_t
*__fcache_find(struct super_block
*sb
, u32 sec
)
394 cache_ent_t
*bp
, *hp
;
395 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
397 off
= (sec
+ (sec
>> fsi
->sect_per_clus_bits
)) & (FAT_CACHE_HASH_SIZE
- 1);
399 hp
= &(fsi
->fcache
.hash_list
[off
]);
400 for (bp
= hp
->hash
.next
; bp
!= hp
; bp
= bp
->hash
.next
) {
401 if (bp
->sec
== sec
) {
404 * patch 1.2.4 : for debugging
406 WARN(!bp
->bh
, "[SDFAT] fcache has no bh. "
407 "It will make system panic.\n");
409 touch_buffer(bp
->bh
);
414 } /* end of __fcache_find */
416 static cache_ent_t
*__fcache_get(struct super_block
*sb
, u32 sec
)
419 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
421 bp
= fsi
->fcache
.lru_list
.prev
;
423 #ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
424 while (bp
->flag
& DIRTYBIT
) {
425 cache_ent_t
*bp_prev
= bp
->prev
;
428 if (bp
== &fsi
->fcache
.lru_list
) {
429 DMSG("BD: fat cache flooding\n");
430 fcache_flush(sb
, 0); // flush all dirty FAT caches
431 bp
= fsi
->fcache
.lru_list
.prev
;
435 // if (bp->flag & DIRTYBIT)
436 // sync_dirty_buffer(bp->bh);
438 move_to_mru(bp
, &fsi
->fcache
.lru_list
);
440 } /* end of __fcache_get */
442 static void __fcache_insert_hash(struct super_block
*sb
, cache_ent_t
*bp
)
448 fsi
= &(SDFAT_SB(sb
)->fsi
);
449 off
= (bp
->sec
+ (bp
->sec
>> fsi
->sect_per_clus_bits
)) & (FAT_CACHE_HASH_SIZE
-1);
451 hp
= &(fsi
->fcache
.hash_list
[off
]);
452 bp
->hash
.next
= hp
->hash
.next
;
454 hp
->hash
.next
->hash
.prev
= bp
;
456 } /* end of __fcache_insert_hash */
459 static void __fcache_remove_hash(cache_ent_t
*bp
)
461 #ifdef DEBUG_HASH_LIST
462 if ( (bp
->hash
.next
== (cache_ent_t
*)DEBUG_HASH_NEXT
) ||
463 (bp
->hash
.prev
== (cache_ent_t
*)DEBUG_HASH_PREV
) ) {
464 EMSG("%s: FATAL: tried to remove already-removed-cache-entry"
465 "(bp:%p)\n", __func__
, bp
);
469 WARN_ON(bp
->flag
& DIRTYBIT
);
470 __remove_from_hash(bp
);
471 } /* end of __fcache_remove_hash */
473 /*======================================================================*/
474 /* Buffer Read/Write Functions */
475 /*======================================================================*/
476 /* Read-ahead a cluster */
477 s32
dcache_readahead(struct super_block
*sb
, u32 sec
)
479 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
480 struct buffer_head
*bh
;
481 u32 max_ra_count
= DCACHE_MAX_RA_SIZE
>> sb
->s_blocksize_bits
;
482 u32 page_ra_count
= PAGE_SIZE
>> sb
->s_blocksize_bits
;
483 u32 adj_ra_count
= max(fsi
->sect_per_clus
, page_ra_count
);
484 u32 ra_count
= min(adj_ra_count
, max_ra_count
);
486 /* Read-ahead is not required */
487 if (fsi
->sect_per_clus
== 1)
490 if (sec
< fsi
->data_start_sector
) {
491 EMSG("BD: %s: requested sector is invalid(sect:%u, root:%u)\n",
492 __func__
, sec
, fsi
->data_start_sector
);
496 /* Not sector aligned with ra_count, resize ra_count to page size */
497 if ((sec
- fsi
->data_start_sector
) & (ra_count
- 1))
498 ra_count
= page_ra_count
;
500 bh
= sb_find_get_block(sb
, sec
);
501 if (!bh
|| !buffer_uptodate(bh
))
502 bdev_readahead(sb
, sec
, ra_count
);
510 * returns 1, if bp is flushed
511 * returns 0, if bp is not dirty
512 * returns -1, if error occurs
514 static s32
__dcache_ent_flush(struct super_block
*sb
, cache_ent_t
*bp
, u32 sync
)
516 if (!(bp
->flag
& DIRTYBIT
))
518 #ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
519 // Make buffer dirty (XXX: Naive impl.)
520 if (write_sect(sb
, bp
->sec
, bp
->bh
, 0))
523 bp
->flag
&= ~(DIRTYBIT
);
526 sync_dirty_buffer(bp
->bh
);
531 static s32
__dcache_ent_discard(struct super_block
*sb
, cache_ent_t
*bp
)
533 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
535 MMSG("%s : bp[%p] (sec:%08x flag:%08x bh:%p) list(prev:%p next:%p) "
536 "hash(prev:%p next:%p)\n", __func__
,
537 bp
, bp
->sec
, bp
->flag
, bp
->bh
, bp
->prev
, bp
->next
,
538 bp
->hash
.prev
, bp
->hash
.next
);
540 __dcache_remove_hash(bp
);
549 move_to_lru(bp
, &fsi
->dcache
.lru_list
);
553 u8
*dcache_getblk(struct super_block
*sb
, u32 sec
)
556 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
558 bp
= __dcache_find(sb
, sec
);
560 if (bdev_check_bdi_valid(sb
)) {
561 MMSG("%s: found cache(%p, sect:%u). But invalid BDI\n"
562 , __func__
, bp
, sec
);
563 __dcache_ent_flush(sb
, bp
, 0);
564 __dcache_ent_discard(sb
, bp
);
568 if (!(bp
->flag
& KEEPBIT
)) // already in keep list
569 move_to_mru(bp
, &fsi
->dcache
.lru_list
);
571 return(bp
->bh
->b_data
);
574 bp
= __dcache_get(sb
, sec
);
576 if (!__check_hash_valid(bp
))
577 __dcache_remove_hash(bp
);
581 __dcache_insert_hash(sb
, bp
);
583 if (read_sect(sb
, sec
, &(bp
->bh
), 1)) {
584 __dcache_ent_discard(sb
, bp
);
588 return bp
->bh
->b_data
;
592 s32
dcache_modify(struct super_block
*sb
, u32 sec
)
599 bp
= __dcache_find(sb
, sec
);
601 #ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
602 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
603 if (fsi
->vol_type
!= EXFAT
) {
604 bp
->flag
|= DIRTYBIT
;
608 ret
= write_sect(sb
, sec
, bp
->bh
, 0);
612 DMSG("%s : failed to modify buffer(err:%d, sec:%u, bp:0x%p)\n",
613 __func__
, ret
, sec
, bp
);
619 s32
dcache_lock(struct super_block
*sb
, u32 sec
)
623 bp
= __dcache_find(sb
, sec
);
629 EMSG("%s : failed to lock buffer(sec:%u, bp:0x%p)\n", __func__
, sec
, bp
);
633 s32
dcache_unlock(struct super_block
*sb
, u32 sec
)
637 bp
= __dcache_find(sb
, sec
);
639 bp
->flag
&= ~(LOCKBIT
);
643 EMSG("%s : failed to unlock buffer (sec:%u, bp:0x%p)\n", __func__
, sec
, bp
);
647 s32
dcache_release(struct super_block
*sb
, u32 sec
)
650 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
652 bp
= __dcache_find(sb
, sec
);
656 #ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
657 if (bp
->flag
& DIRTYBIT
) {
658 if (write_sect(sb
, bp
->sec
, bp
->bh
, 0))
670 move_to_lru(bp
, &fsi
->dcache
.lru_list
);
674 s32
dcache_release_all(struct super_block
*sb
)
678 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
681 /* Connect list elements */
682 /* LRU list : (A - B - ... - bp_front) + (bp_first + ... + bp_last) */
683 while (fsi
->dcache
.keep_list
.prev
!= &fsi
->dcache
.keep_list
){
684 cache_ent_t
*bp_keep
= fsi
->dcache
.keep_list
.prev
;
685 // bp_keep->flag &= ~(KEEPBIT); // Will be 0-ed later
686 move_to_mru(bp_keep
, &fsi
->dcache
.lru_list
);
689 bp
= fsi
->dcache
.lru_list
.next
;
690 while (bp
!= &fsi
->dcache
.lru_list
) {
691 #ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
692 if (bp
->flag
& DIRTYBIT
) {
694 if (write_sect(sb
, bp
->sec
, bp
->bh
, 0))
709 DMSG("BD:Release / dirty buf cache: %d (err:%d)", dirtycnt
, ret
);
714 s32
dcache_flush(struct super_block
*sb
, u32 sync
)
718 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
722 /* Connect list elements */
723 /* LRU list : (A - B - ... - bp_front) + (bp_first + ... + bp_last) */
725 while (fsi
->dcache
.keep_list
.prev
!= &fsi
->dcache
.keep_list
){
726 cache_ent_t
*bp_keep
= fsi
->dcache
.keep_list
.prev
;
727 bp_keep
->flag
&= ~(KEEPBIT
); // Will be 0-ed later
728 move_to_mru(bp_keep
, &fsi
->dcache
.lru_list
);
733 bp
= fsi
->dcache
.lru_list
.next
;
734 while (bp
!= &fsi
->dcache
.lru_list
) {
735 if (bp
->flag
& DIRTYBIT
) {
736 #ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
737 // Make buffer dirty (XXX: Naive impl.)
738 if (write_sect(sb
, bp
->sec
, bp
->bh
, 0)) {
744 bp
->flag
&= ~(DIRTYBIT
);
748 sync_dirty_buffer(bp
->bh
);
753 MMSG("BD: flush / dirty dentry cache: %d (%d from keeplist, err:%d)\n",
754 dirtycnt
, keepcnt
, ret
);
758 static cache_ent_t
*__dcache_find(struct super_block
*sb
, u32 sec
)
761 cache_ent_t
*bp
, *hp
;
762 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
764 off
= (sec
+ (sec
>> fsi
->sect_per_clus_bits
)) & (BUF_CACHE_HASH_SIZE
- 1);
766 hp
= &(fsi
->dcache
.hash_list
[off
]);
767 for (bp
= hp
->hash
.next
; bp
!= hp
; bp
= bp
->hash
.next
) {
768 if (bp
->sec
== sec
) {
769 touch_buffer(bp
->bh
);
774 } /* end of __dcache_find */
776 static cache_ent_t
*__dcache_get(struct super_block
*sb
, u32 sec
)
779 FS_INFO_T
*fsi
= &(SDFAT_SB(sb
)->fsi
);
781 bp
= fsi
->dcache
.lru_list
.prev
;
782 #ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
783 while (bp
->flag
& (DIRTYBIT
| LOCKBIT
)) {
784 cache_ent_t
*bp_prev
= bp
->prev
; // hold prev
786 if (bp
->flag
& DIRTYBIT
) {
787 MMSG("BD: Buf cache => Keep list\n");
789 move_to_mru(bp
, &fsi
->dcache
.keep_list
);
794 /* If all dcaches are dirty */
795 if (bp
== &fsi
->dcache
.lru_list
) {
796 DMSG("BD: buf cache flooding\n");
798 bp
= fsi
->dcache
.lru_list
.prev
;
802 while (bp
->flag
& LOCKBIT
)
805 // if (bp->flag & DIRTYBIT)
806 // sync_dirty_buffer(bp->bh);
808 move_to_mru(bp
, &fsi
->dcache
.lru_list
);
810 } /* end of __dcache_get */
812 static void __dcache_insert_hash(struct super_block
*sb
, cache_ent_t
*bp
)
818 fsi
= &(SDFAT_SB(sb
)->fsi
);
819 off
= (bp
->sec
+ (bp
->sec
>> fsi
->sect_per_clus_bits
)) & (BUF_CACHE_HASH_SIZE
-1);
821 hp
= &(fsi
->dcache
.hash_list
[off
]);
822 bp
->hash
.next
= hp
->hash
.next
;
824 hp
->hash
.next
->hash
.prev
= bp
;
826 } /* end of __dcache_insert_hash */
828 static void __dcache_remove_hash(cache_ent_t
*bp
)
830 #ifdef DEBUG_HASH_LIST
831 if ( (bp
->hash
.next
== (cache_ent_t
*)DEBUG_HASH_NEXT
) ||
832 (bp
->hash
.prev
== (cache_ent_t
*)DEBUG_HASH_PREV
) ) {
833 EMSG("%s: FATAL: tried to remove already-removed-cache-entry"
834 "(bp:%p)\n", __func__
, bp
);
838 WARN_ON(bp
->flag
& DIRTYBIT
);
839 __remove_from_hash(bp
);
840 } /* end of __dcache_remove_hash */