V4L/DVB (13571): v4l: Adding Digital Video Timings APIs
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / fs / sdfat / cache.c
1 /*
2 * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
17 * MA 02110-1301, USA.
18 */
19
20 /************************************************************************/
21 /* */
22 /* PROJECT : exFAT & FAT12/16/32 File System */
23 /* FILE : cache.c */
24 /* PURPOSE : sdFAT Cache Manager */
25 /* (FAT Cache & Buffer Cache) */
26 /* */
27 /*----------------------------------------------------------------------*/
28 /* NOTES */
29 /* */
30 /* */
31 /************************************************************************/
32
33 #include <linux/swap.h> /* for mark_page_accessed() */
34 #include <asm/unaligned.h>
35
36 #include "sdfat.h"
37 #include "core.h"
38
39 #define DEBUG_HASH_LIST
40 #define DEBUG_HASH_PREV (0xAAAA5555)
41 #define DEBUG_HASH_NEXT (0x5555AAAA)
42
43 /*----------------------------------------------------------------------*/
44 /* Global Variable Definitions */
45 /*----------------------------------------------------------------------*/
46 /* All buffer structures are protected w/ fsi->v_sem */
47
48 /*----------------------------------------------------------------------*/
49 /* Local Variable Definitions */
50 /*----------------------------------------------------------------------*/
51 #define LOCKBIT 0x01
52 #define DIRTYBIT 0x02
53 #define KEEPBIT 0x04
54
55 /*----------------------------------------------------------------------*/
56 /* Cache handling function declarations */
57 /*----------------------------------------------------------------------*/
58 static cache_ent_t *__fcache_find(struct super_block *sb, u32 sec);
59 static cache_ent_t *__fcache_get(struct super_block *sb, u32 sec);
60 static void __fcache_insert_hash(struct super_block *sb, cache_ent_t *bp);
61 static void __fcache_remove_hash(cache_ent_t *bp);
62
63 static cache_ent_t *__dcache_find(struct super_block *sb, u32 sec);
64 static cache_ent_t *__dcache_get(struct super_block *sb, u32 sec);
65 static void __dcache_insert_hash(struct super_block *sb, cache_ent_t *bp);
66 static void __dcache_remove_hash(cache_ent_t *bp);
67
68 /*----------------------------------------------------------------------*/
69 /* Static functions */
70 /*----------------------------------------------------------------------*/
71 static void push_to_mru(cache_ent_t *bp, cache_ent_t *list)
72 {
73 bp->next = list->next;
74 bp->prev = list;
75 list->next->prev = bp;
76 list->next = bp;
77 } /* end of __dcache_push_to_mru */
78
79 static void push_to_lru(cache_ent_t *bp, cache_ent_t *list)
80 {
81 bp->prev = list->prev;
82 bp->next = list;
83 list->prev->next = bp;
84 list->prev = bp;
85 } /* end of __dcache_push_to_lru */
86
87 static void move_to_mru(cache_ent_t *bp, cache_ent_t *list)
88 {
89 bp->prev->next = bp->next;
90 bp->next->prev = bp->prev;
91 push_to_mru(bp, list);
92 } /* end of __dcache_move_to_mru */
93
94 static void move_to_lru(cache_ent_t *bp, cache_ent_t *list)
95 {
96 bp->prev->next = bp->next;
97 bp->next->prev = bp->prev;
98 push_to_lru(bp, list);
99 } /* end of __dcache_move_to_lru */
100
101 static inline s32 __check_hash_valid(cache_ent_t *bp)
102 {
103 #ifdef DEBUG_HASH_LIST
104 if ( (bp->hash.next == (cache_ent_t*)DEBUG_HASH_NEXT) ||
105 (bp->hash.prev == (cache_ent_t*)DEBUG_HASH_PREV) ) {
106 return -EINVAL;
107 }
108 #endif
109 if ( (bp->hash.next == bp) || (bp->hash.prev == bp) )
110 return -EINVAL;
111
112 return 0;
113 }
114
115 static inline void __remove_from_hash(cache_ent_t *bp)
116 {
117 (bp->hash.prev)->hash.next = bp->hash.next;
118 (bp->hash.next)->hash.prev = bp->hash.prev;
119 bp->hash.next = bp;
120 bp->hash.prev = bp;
121 #ifdef DEBUG_HASH_LIST
122 bp->hash.next = (cache_ent_t*)DEBUG_HASH_NEXT;
123 bp->hash.prev = (cache_ent_t*)DEBUG_HASH_PREV;
124 #endif
125 }
126
127 /* Do FAT mirroring (don't sync)
128 sec: sector No. in FAT1
129 bh: bh of sec.
130 */
131 static inline s32 __fat_copy(struct super_block *sb, u32 sec, struct buffer_head *bh, int sync)
132 {
133 #ifdef CONFIG_SDFAT_FAT_MIRRORING
134 FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
135 int sec2;
136
137 if (fsi->FAT2_start_sector != fsi->FAT1_start_sector) {
138 sec2 = sec - fsi->FAT1_start_sector + fsi->FAT2_start_sector;
139 BUG_ON(sec2 != (sec + fsi->num_FAT_sectors));
140
141 MMSG("BD: fat mirroring (%d in FAT1, %d in FAT2)\n", sec, sec2);
142 if (write_sect(sb, sec2, bh, sync))
143 return -EIO;
144 }
145 #else
146 /* DO NOTHING */
147 #endif
148 return 0;
149 } /* end of __fat_copy */
150
151 /*
152 * returns 1, if bp is flushed
153 * returns 0, if bp is not dirty
154 * returns -1, if error occurs
155 */
156 static s32 __fcache_ent_flush(struct super_block *sb, cache_ent_t *bp, u32 sync)
157 {
158 if (!(bp->flag & DIRTYBIT))
159 return 0;
160 #ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
161 // Make buffer dirty (XXX: Naive impl.)
162 if (write_sect(sb, bp->sec, bp->bh, 0))
163 return -EIO;
164
165 if (__fat_copy(sb, bp->sec, bp->bh, 0))
166 return -EIO;
167 #endif
168 bp->flag &= ~(DIRTYBIT);
169
170 if (sync)
171 sync_dirty_buffer(bp->bh);
172
173 return 1;
174 }
175
176 static s32 __fcache_ent_discard(struct super_block *sb, cache_ent_t *bp)
177 {
178 FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
179 __fcache_remove_hash(bp);
180 bp->sec = ~0;
181 bp->flag = 0;
182
183 if(bp->bh) {
184 __brelse(bp->bh);
185 bp->bh = NULL;
186 }
187 move_to_lru(bp, &fsi->fcache.lru_list);
188 return 0;
189 }
190
191 u8 *fcache_getblk(struct super_block *sb, u32 sec)
192 {
193 cache_ent_t *bp;
194 FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
195 u32 page_ra_count = FCACHE_MAX_RA_SIZE >> sb->s_blocksize_bits;
196
197 bp = __fcache_find(sb, sec);
198 if (bp) {
199 if (bdev_check_bdi_valid(sb)) {
200 __fcache_ent_flush(sb, bp, 0);
201 __fcache_ent_discard(sb, bp);
202 return NULL;
203 }
204 move_to_mru(bp, &fsi->fcache.lru_list);
205 return(bp->bh->b_data);
206 }
207
208 bp = __fcache_get(sb, sec);
209
210 if (!__check_hash_valid(bp))
211 __fcache_remove_hash(bp);
212
213 bp->sec = sec;
214 bp->flag = 0;
215 __fcache_insert_hash(sb, bp);
216
217 /* Naive FAT read-ahead (increase I/O unit to page_ra_count) */
218 if ((sec & (page_ra_count - 1)) == 0)
219 bdev_readahead(sb, sec, page_ra_count);
220
221 /*
222 * patch 1.2.4 : buffer_head null pointer exception problem.
223 *
224 * When read_sect is failed, fcache should be moved to
225 * EMPTY hash_list and the first of lru_list.
226 */
227 if (read_sect(sb, sec, &(bp->bh), 1)) {
228 __fcache_ent_discard(sb, bp);
229 return NULL;
230 }
231
232 return bp->bh->b_data;
233 }
234
235 static inline int __mark_delayed_dirty(struct super_block *sb, cache_ent_t *bp)
236 {
237 #ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
238 FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
239 if (fsi->vol_type == EXFAT)
240 return -ENOTSUPP;
241
242 bp->flag |= DIRTYBIT;
243 return 0;
244 #else
245 return -ENOTSUPP;
246 #endif
247 }
248
249
250
251 s32 fcache_modify(struct super_block *sb, u32 sec)
252 {
253 cache_ent_t *bp;
254
255 bp = __fcache_find(sb, sec);
256 if (!bp)
257 return -EIO;
258
259 if (!__mark_delayed_dirty(sb, bp))
260 return 0;
261
262 if (write_sect(sb, sec, bp->bh, 0))
263 return -EIO;
264
265 if (__fat_copy(sb, sec, bp->bh, 0))
266 return -EIO;
267
268 return 0;
269 }
270
271 /*======================================================================*/
272 /* Cache Initialization Functions */
273 /*======================================================================*/
274 s32 meta_cache_init(struct super_block *sb)
275 {
276 FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
277 s32 i;
278
279 /* LRU list */
280 fsi->fcache.lru_list.next = &fsi->fcache.lru_list;
281 fsi->fcache.lru_list.prev = fsi->fcache.lru_list.next;
282
283 for (i = 0; i < FAT_CACHE_SIZE; i++) {
284 fsi->fcache.pool[i].sec = ~0;
285 fsi->fcache.pool[i].flag = 0;
286 fsi->fcache.pool[i].bh = NULL;
287 fsi->fcache.pool[i].prev = NULL;
288 fsi->fcache.pool[i].next = NULL;
289 push_to_mru(&(fsi->fcache.pool[i]), &fsi->fcache.lru_list);
290 }
291
292 fsi->dcache.lru_list.next = &fsi->dcache.lru_list;
293 fsi->dcache.lru_list.prev = fsi->dcache.lru_list.next;
294 fsi->dcache.keep_list.next = &fsi->dcache.keep_list;
295 fsi->dcache.keep_list.prev = fsi->dcache.keep_list.next;
296
297 // Initially, all the BUF_CACHEs are in the LRU list
298 for (i = 0; i < BUF_CACHE_SIZE; i++) {
299 fsi->dcache.pool[i].sec = ~0;
300 fsi->dcache.pool[i].flag = 0;
301 fsi->dcache.pool[i].bh = NULL;
302 fsi->dcache.pool[i].prev = NULL;
303 fsi->dcache.pool[i].next = NULL;
304 push_to_mru(&(fsi->dcache.pool[i]), &fsi->dcache.lru_list);
305 }
306
307 /* HASH list */
308 for (i = 0; i < FAT_CACHE_HASH_SIZE; i++) {
309 fsi->fcache.hash_list[i].sec = ~0;
310 fsi->fcache.hash_list[i].hash.next = &(fsi->fcache.hash_list[i]);
311 ;
312 fsi->fcache.hash_list[i].hash.prev = fsi->fcache.hash_list[i].hash.next;
313 }
314
315 for (i = 0; i < FAT_CACHE_SIZE; i++)
316 __fcache_insert_hash(sb, &(fsi->fcache.pool[i]));
317
318 for (i = 0; i < BUF_CACHE_HASH_SIZE; i++) {
319 fsi->dcache.hash_list[i].sec = ~0;
320 fsi->dcache.hash_list[i].hash.next = &(fsi->dcache.hash_list[i]);
321
322 fsi->dcache.hash_list[i].hash.prev = fsi->dcache.hash_list[i].hash.next;
323 }
324
325 for (i = 0; i < BUF_CACHE_SIZE; i++)
326 __dcache_insert_hash(sb, &(fsi->dcache.pool[i]));
327
328 return 0;
329 }
330
331 s32 meta_cache_shutdown(struct super_block *sb)
332 {
333 return 0;
334 }
335
336 /*======================================================================*/
337 /* FAT Read/Write Functions */
338 /*======================================================================*/
339 s32 fcache_release_all(struct super_block *sb)
340 {
341 s32 ret = 0;
342 cache_ent_t *bp;
343 FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
344 s32 dirtycnt = 0;
345
346 bp = fsi->fcache.lru_list.next;
347 while (bp != &fsi->fcache.lru_list) {
348 s32 ret_tmp = __fcache_ent_flush(sb, bp, 0);
349 if (ret_tmp < 0)
350 ret = ret_tmp;
351 else
352 dirtycnt += ret_tmp;
353
354 bp->sec = ~0;
355 bp->flag = 0;
356
357 if(bp->bh) {
358 __brelse(bp->bh);
359 bp->bh = NULL;
360 }
361 bp = bp->next;
362 }
363
364 DMSG("BD:Release / dirty fat cache: %d (err:%d)\n", dirtycnt, ret);
365 return ret;
366 }
367
368
369 /* internal DIRTYBIT marked => bh dirty */
370 s32 fcache_flush(struct super_block *sb, u32 sync)
371 {
372 s32 ret = 0;
373 cache_ent_t *bp;
374 FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
375 s32 dirtycnt = 0;
376
377 bp = fsi->fcache.lru_list.next;
378 while (bp != &fsi->fcache.lru_list) {
379 ret = __fcache_ent_flush(sb, bp, sync);
380 if (ret < 0)
381 break;
382
383 dirtycnt += ret;
384 bp = bp->next;
385 }
386
387 MMSG("BD: flush / dirty fat cache: %d (err:%d)\n", dirtycnt, ret);
388 return ret;
389 }
390
391 static cache_ent_t *__fcache_find(struct super_block *sb, u32 sec)
392 {
393 s32 off;
394 cache_ent_t *bp, *hp;
395 FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
396
397 off = (sec + (sec >> fsi->sect_per_clus_bits)) & (FAT_CACHE_HASH_SIZE - 1);
398
399 hp = &(fsi->fcache.hash_list[off]);
400 for (bp = hp->hash.next; bp != hp; bp = bp->hash.next) {
401 if (bp->sec == sec) {
402
403 /*
404 * patch 1.2.4 : for debugging
405 */
406 WARN(!bp->bh, "[SDFAT] fcache has no bh. "
407 "It will make system panic.\n");
408
409 touch_buffer(bp->bh);
410 return(bp);
411 }
412 }
413 return(NULL);
414 } /* end of __fcache_find */
415
416 static cache_ent_t *__fcache_get(struct super_block *sb, u32 sec)
417 {
418 cache_ent_t *bp;
419 FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
420
421 bp = fsi->fcache.lru_list.prev;
422
423 #ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
424 while (bp->flag & DIRTYBIT) {
425 cache_ent_t *bp_prev = bp->prev;
426
427 bp = bp_prev;
428 if (bp == &fsi->fcache.lru_list) {
429 DMSG("BD: fat cache flooding\n");
430 fcache_flush(sb, 0); // flush all dirty FAT caches
431 bp = fsi->fcache.lru_list.prev;
432 }
433 }
434 #endif
435 // if (bp->flag & DIRTYBIT)
436 // sync_dirty_buffer(bp->bh);
437
438 move_to_mru(bp, &fsi->fcache.lru_list);
439 return(bp);
440 } /* end of __fcache_get */
441
442 static void __fcache_insert_hash(struct super_block *sb, cache_ent_t *bp)
443 {
444 s32 off;
445 cache_ent_t *hp;
446 FS_INFO_T *fsi;
447
448 fsi = &(SDFAT_SB(sb)->fsi);
449 off = (bp->sec + (bp->sec >> fsi->sect_per_clus_bits)) & (FAT_CACHE_HASH_SIZE-1);
450
451 hp = &(fsi->fcache.hash_list[off]);
452 bp->hash.next = hp->hash.next;
453 bp->hash.prev = hp;
454 hp->hash.next->hash.prev = bp;
455 hp->hash.next = bp;
456 } /* end of __fcache_insert_hash */
457
458
459 static void __fcache_remove_hash(cache_ent_t *bp)
460 {
461 #ifdef DEBUG_HASH_LIST
462 if ( (bp->hash.next == (cache_ent_t*)DEBUG_HASH_NEXT) ||
463 (bp->hash.prev == (cache_ent_t*)DEBUG_HASH_PREV) ) {
464 EMSG("%s: FATAL: tried to remove already-removed-cache-entry"
465 "(bp:%p)\n", __func__, bp);
466 return;
467 }
468 #endif
469 WARN_ON(bp->flag & DIRTYBIT);
470 __remove_from_hash(bp);
471 } /* end of __fcache_remove_hash */
472
473 /*======================================================================*/
474 /* Buffer Read/Write Functions */
475 /*======================================================================*/
476 /* Read-ahead a cluster */
477 s32 dcache_readahead(struct super_block *sb, u32 sec)
478 {
479 FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
480 struct buffer_head *bh;
481 u32 max_ra_count = DCACHE_MAX_RA_SIZE >> sb->s_blocksize_bits;
482 u32 page_ra_count = PAGE_SIZE >> sb->s_blocksize_bits;
483 u32 adj_ra_count = max(fsi->sect_per_clus, page_ra_count);
484 u32 ra_count = min(adj_ra_count, max_ra_count);
485
486 /* Read-ahead is not required */
487 if (fsi->sect_per_clus == 1)
488 return 0;
489
490 if (sec < fsi->data_start_sector) {
491 EMSG("BD: %s: requested sector is invalid(sect:%u, root:%u)\n",
492 __func__, sec, fsi->data_start_sector);
493 return -EIO;
494 }
495
496 /* Not sector aligned with ra_count, resize ra_count to page size */
497 if ((sec - fsi->data_start_sector) & (ra_count - 1))
498 ra_count = page_ra_count;
499
500 bh = sb_find_get_block(sb, sec);
501 if (!bh || !buffer_uptodate(bh))
502 bdev_readahead(sb, sec, ra_count);
503
504 brelse(bh);
505
506 return 0;
507 }
508
509 /*
510 * returns 1, if bp is flushed
511 * returns 0, if bp is not dirty
512 * returns -1, if error occurs
513 */
514 static s32 __dcache_ent_flush(struct super_block *sb, cache_ent_t *bp, u32 sync)
515 {
516 if (!(bp->flag & DIRTYBIT))
517 return 0;
518 #ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
519 // Make buffer dirty (XXX: Naive impl.)
520 if (write_sect(sb, bp->sec, bp->bh, 0))
521 return -EIO;
522 #endif
523 bp->flag &= ~(DIRTYBIT);
524
525 if (sync)
526 sync_dirty_buffer(bp->bh);
527
528 return 1;
529 }
530
531 static s32 __dcache_ent_discard(struct super_block *sb, cache_ent_t *bp)
532 {
533 FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
534
535 MMSG("%s : bp[%p] (sec:%08x flag:%08x bh:%p) list(prev:%p next:%p) "
536 "hash(prev:%p next:%p)\n", __func__,
537 bp, bp->sec, bp->flag, bp->bh, bp->prev, bp->next,
538 bp->hash.prev, bp->hash.next);
539
540 __dcache_remove_hash(bp);
541 bp->sec = ~0;
542 bp->flag = 0;
543
544 if(bp->bh) {
545 __brelse(bp->bh);
546 bp->bh = NULL;
547 }
548
549 move_to_lru(bp, &fsi->dcache.lru_list);
550 return 0;
551 }
552
553 u8 *dcache_getblk(struct super_block *sb, u32 sec)
554 {
555 cache_ent_t *bp;
556 FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
557
558 bp = __dcache_find(sb, sec);
559 if (bp) {
560 if (bdev_check_bdi_valid(sb)) {
561 MMSG("%s: found cache(%p, sect:%u). But invalid BDI\n"
562 , __func__, bp, sec);
563 __dcache_ent_flush(sb, bp, 0);
564 __dcache_ent_discard(sb, bp);
565 return NULL;
566 }
567
568 if (!(bp->flag & KEEPBIT)) // already in keep list
569 move_to_mru(bp, &fsi->dcache.lru_list);
570
571 return(bp->bh->b_data);
572 }
573
574 bp = __dcache_get(sb, sec);
575
576 if (!__check_hash_valid(bp))
577 __dcache_remove_hash(bp);
578
579 bp->sec = sec;
580 bp->flag = 0;
581 __dcache_insert_hash(sb, bp);
582
583 if (read_sect(sb, sec, &(bp->bh), 1)) {
584 __dcache_ent_discard(sb, bp);
585 return NULL;
586 }
587
588 return bp->bh->b_data;
589
590 }
591
592 s32 dcache_modify(struct super_block *sb, u32 sec)
593 {
594 s32 ret = -EIO;
595 cache_ent_t *bp;
596
597 set_sb_dirty(sb);
598
599 bp = __dcache_find(sb, sec);
600 if (likely(bp)) {
601 #ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
602 FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
603 if (fsi->vol_type != EXFAT) {
604 bp->flag |= DIRTYBIT;
605 return 0;
606 }
607 #endif
608 ret = write_sect(sb, sec, bp->bh, 0);
609 }
610
611 if (ret) {
612 DMSG("%s : failed to modify buffer(err:%d, sec:%u, bp:0x%p)\n",
613 __func__, ret, sec, bp);
614 }
615
616 return ret;
617 }
618
619 s32 dcache_lock(struct super_block *sb, u32 sec)
620 {
621 cache_ent_t *bp;
622
623 bp = __dcache_find(sb, sec);
624 if (likely(bp)) {
625 bp->flag |= LOCKBIT;
626 return 0;
627 }
628
629 EMSG("%s : failed to lock buffer(sec:%u, bp:0x%p)\n", __func__, sec, bp);
630 return -EIO;
631 }
632
633 s32 dcache_unlock(struct super_block *sb, u32 sec)
634 {
635 cache_ent_t *bp;
636
637 bp = __dcache_find(sb, sec);
638 if (likely(bp)) {
639 bp->flag &= ~(LOCKBIT);
640 return 0;
641 }
642
643 EMSG("%s : failed to unlock buffer (sec:%u, bp:0x%p)\n", __func__, sec, bp);
644 return -EIO;
645 }
646
647 s32 dcache_release(struct super_block *sb, u32 sec)
648 {
649 cache_ent_t *bp;
650 FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
651
652 bp = __dcache_find(sb, sec);
653 if (unlikely(!bp))
654 return -ENOENT;
655
656 #ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
657 if (bp->flag & DIRTYBIT) {
658 if (write_sect(sb, bp->sec, bp->bh, 0))
659 return -EIO;
660 }
661 #endif
662 bp->sec = ~0;
663 bp->flag = 0;
664
665 if(bp->bh) {
666 __brelse(bp->bh);
667 bp->bh = NULL;
668 }
669
670 move_to_lru(bp, &fsi->dcache.lru_list);
671 return 0;
672 }
673
674 s32 dcache_release_all(struct super_block *sb)
675 {
676 s32 ret = 0;
677 cache_ent_t *bp;
678 FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
679 s32 dirtycnt = 0;
680
681 /* Connect list elements */
682 /* LRU list : (A - B - ... - bp_front) + (bp_first + ... + bp_last) */
683 while (fsi->dcache.keep_list.prev != &fsi->dcache.keep_list){
684 cache_ent_t *bp_keep = fsi->dcache.keep_list.prev;
685 // bp_keep->flag &= ~(KEEPBIT); // Will be 0-ed later
686 move_to_mru(bp_keep, &fsi->dcache.lru_list);
687 }
688
689 bp = fsi->dcache.lru_list.next;
690 while (bp != &fsi->dcache.lru_list) {
691 #ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
692 if (bp->flag & DIRTYBIT) {
693 dirtycnt++;
694 if (write_sect(sb, bp->sec, bp->bh, 0))
695 ret = -EIO;
696 }
697 #endif
698
699 bp->sec = ~0;
700 bp->flag = 0;
701
702 if(bp->bh) {
703 __brelse(bp->bh);
704 bp->bh = NULL;
705 }
706 bp = bp->next;
707 }
708
709 DMSG("BD:Release / dirty buf cache: %d (err:%d)", dirtycnt, ret);
710 return ret;
711 }
712
713
714 s32 dcache_flush(struct super_block *sb, u32 sync)
715 {
716 s32 ret = 0;
717 cache_ent_t *bp;
718 FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
719 s32 dirtycnt = 0;
720 s32 keepcnt = 0;
721
722 /* Connect list elements */
723 /* LRU list : (A - B - ... - bp_front) + (bp_first + ... + bp_last) */
724 // XXX: optimization
725 while (fsi->dcache.keep_list.prev != &fsi->dcache.keep_list){
726 cache_ent_t *bp_keep = fsi->dcache.keep_list.prev;
727 bp_keep->flag &= ~(KEEPBIT); // Will be 0-ed later
728 move_to_mru(bp_keep, &fsi->dcache.lru_list);
729
730 keepcnt++;
731 }
732
733 bp = fsi->dcache.lru_list.next;
734 while (bp != &fsi->dcache.lru_list) {
735 if (bp->flag & DIRTYBIT) {
736 #ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
737 // Make buffer dirty (XXX: Naive impl.)
738 if (write_sect(sb, bp->sec, bp->bh, 0)) {
739 ret = -EIO;
740 break;
741 }
742
743 #endif
744 bp->flag &= ~(DIRTYBIT);
745 dirtycnt++;
746
747 if (sync != 0)
748 sync_dirty_buffer(bp->bh);
749 }
750 bp = bp->next;
751 }
752
753 MMSG("BD: flush / dirty dentry cache: %d (%d from keeplist, err:%d)\n",
754 dirtycnt, keepcnt, ret);
755 return ret;
756 }
757
758 static cache_ent_t *__dcache_find(struct super_block *sb, u32 sec)
759 {
760 s32 off;
761 cache_ent_t *bp, *hp;
762 FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
763
764 off = (sec + (sec >> fsi->sect_per_clus_bits)) & (BUF_CACHE_HASH_SIZE - 1);
765
766 hp = &(fsi->dcache.hash_list[off]);
767 for (bp = hp->hash.next; bp != hp; bp = bp->hash.next) {
768 if (bp->sec == sec) {
769 touch_buffer(bp->bh);
770 return(bp);
771 }
772 }
773 return NULL;
774 } /* end of __dcache_find */
775
776 static cache_ent_t *__dcache_get(struct super_block *sb, u32 sec)
777 {
778 cache_ent_t *bp;
779 FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
780
781 bp = fsi->dcache.lru_list.prev;
782 #ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
783 while (bp->flag & (DIRTYBIT | LOCKBIT)) {
784 cache_ent_t *bp_prev = bp->prev; // hold prev
785
786 if (bp->flag & DIRTYBIT) {
787 MMSG("BD: Buf cache => Keep list\n");
788 bp->flag |= KEEPBIT;
789 move_to_mru(bp, &fsi->dcache.keep_list);
790 }
791
792 bp = bp_prev;
793
794 /* If all dcaches are dirty */
795 if (bp == &fsi->dcache.lru_list) {
796 DMSG("BD: buf cache flooding\n");
797 dcache_flush(sb, 0);
798 bp = fsi->dcache.lru_list.prev;
799 }
800 }
801 #else
802 while (bp->flag & LOCKBIT)
803 bp = bp->prev;
804 #endif
805 // if (bp->flag & DIRTYBIT)
806 // sync_dirty_buffer(bp->bh);
807
808 move_to_mru(bp, &fsi->dcache.lru_list);
809 return(bp);
810 } /* end of __dcache_get */
811
812 static void __dcache_insert_hash(struct super_block *sb, cache_ent_t *bp)
813 {
814 s32 off;
815 cache_ent_t *hp;
816 FS_INFO_T *fsi;
817
818 fsi = &(SDFAT_SB(sb)->fsi);
819 off = (bp->sec + (bp->sec >> fsi->sect_per_clus_bits)) & (BUF_CACHE_HASH_SIZE-1);
820
821 hp = &(fsi->dcache.hash_list[off]);
822 bp->hash.next = hp->hash.next;
823 bp->hash.prev = hp;
824 hp->hash.next->hash.prev = bp;
825 hp->hash.next = bp;
826 } /* end of __dcache_insert_hash */
827
828 static void __dcache_remove_hash(cache_ent_t *bp)
829 {
830 #ifdef DEBUG_HASH_LIST
831 if ( (bp->hash.next == (cache_ent_t*)DEBUG_HASH_NEXT) ||
832 (bp->hash.prev == (cache_ent_t*)DEBUG_HASH_PREV) ) {
833 EMSG("%s: FATAL: tried to remove already-removed-cache-entry"
834 "(bp:%p)\n", __func__, bp);
835 return;
836 }
837 #endif
838 WARN_ON(bp->flag & DIRTYBIT);
839 __remove_from_hash(bp);
840 } /* end of __dcache_remove_hash */
841
842
843 /* end of cache.c */