4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/mpage.h>
14 #include <linux/backing-dev.h>
15 #include <linux/blkdev.h>
16 #include <linux/pagevec.h>
17 #include <linux/swap.h>
23 static struct kmem_cache
*nat_entry_slab
;
24 static struct kmem_cache
*free_nid_slab
;
26 static void clear_node_page_dirty(struct page
*page
)
28 struct address_space
*mapping
= page
->mapping
;
29 struct f2fs_sb_info
*sbi
= F2FS_SB(mapping
->host
->i_sb
);
30 unsigned int long flags
;
32 if (PageDirty(page
)) {
33 spin_lock_irqsave(&mapping
->tree_lock
, flags
);
34 radix_tree_tag_clear(&mapping
->page_tree
,
37 spin_unlock_irqrestore(&mapping
->tree_lock
, flags
);
39 clear_page_dirty_for_io(page
);
40 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
42 ClearPageUptodate(page
);
45 static struct page
*get_current_nat_page(struct f2fs_sb_info
*sbi
, nid_t nid
)
47 pgoff_t index
= current_nat_addr(sbi
, nid
);
48 return get_meta_page(sbi
, index
);
51 static struct page
*get_next_nat_page(struct f2fs_sb_info
*sbi
, nid_t nid
)
53 struct page
*src_page
;
54 struct page
*dst_page
;
59 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
61 src_off
= current_nat_addr(sbi
, nid
);
62 dst_off
= next_nat_addr(sbi
, src_off
);
64 /* get current nat block page with lock */
65 src_page
= get_meta_page(sbi
, src_off
);
67 /* Dirty src_page means that it is already the new target NAT page. */
68 if (PageDirty(src_page
))
71 dst_page
= grab_meta_page(sbi
, dst_off
);
73 src_addr
= page_address(src_page
);
74 dst_addr
= page_address(dst_page
);
75 memcpy(dst_addr
, src_addr
, PAGE_CACHE_SIZE
);
76 set_page_dirty(dst_page
);
77 f2fs_put_page(src_page
, 1);
79 set_to_next_nat(nm_i
, nid
);
87 static void ra_nat_pages(struct f2fs_sb_info
*sbi
, int nid
)
89 struct address_space
*mapping
= sbi
->meta_inode
->i_mapping
;
90 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
95 for (i
= 0; i
< FREE_NID_PAGES
; i
++, nid
+= NAT_ENTRY_PER_BLOCK
) {
96 if (nid
>= nm_i
->max_nid
)
98 index
= current_nat_addr(sbi
, nid
);
100 page
= grab_cache_page(mapping
, index
);
103 if (PageUptodate(page
)) {
104 f2fs_put_page(page
, 1);
107 if (f2fs_readpage(sbi
, page
, index
, READ
))
110 f2fs_put_page(page
, 0);
114 static struct nat_entry
*__lookup_nat_cache(struct f2fs_nm_info
*nm_i
, nid_t n
)
116 return radix_tree_lookup(&nm_i
->nat_root
, n
);
119 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info
*nm_i
,
120 nid_t start
, unsigned int nr
, struct nat_entry
**ep
)
122 return radix_tree_gang_lookup(&nm_i
->nat_root
, (void **)ep
, start
, nr
);
125 static void __del_from_nat_cache(struct f2fs_nm_info
*nm_i
, struct nat_entry
*e
)
128 radix_tree_delete(&nm_i
->nat_root
, nat_get_nid(e
));
130 kmem_cache_free(nat_entry_slab
, e
);
133 int is_checkpointed_node(struct f2fs_sb_info
*sbi
, nid_t nid
)
135 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
139 read_lock(&nm_i
->nat_tree_lock
);
140 e
= __lookup_nat_cache(nm_i
, nid
);
141 if (e
&& !e
->checkpointed
)
143 read_unlock(&nm_i
->nat_tree_lock
);
147 static struct nat_entry
*grab_nat_entry(struct f2fs_nm_info
*nm_i
, nid_t nid
)
149 struct nat_entry
*new;
151 new = kmem_cache_alloc(nat_entry_slab
, GFP_ATOMIC
);
154 if (radix_tree_insert(&nm_i
->nat_root
, nid
, new)) {
155 kmem_cache_free(nat_entry_slab
, new);
158 memset(new, 0, sizeof(struct nat_entry
));
159 nat_set_nid(new, nid
);
160 list_add_tail(&new->list
, &nm_i
->nat_entries
);
165 static void cache_nat_entry(struct f2fs_nm_info
*nm_i
, nid_t nid
,
166 struct f2fs_nat_entry
*ne
)
170 write_lock(&nm_i
->nat_tree_lock
);
171 e
= __lookup_nat_cache(nm_i
, nid
);
173 e
= grab_nat_entry(nm_i
, nid
);
175 write_unlock(&nm_i
->nat_tree_lock
);
178 nat_set_blkaddr(e
, le32_to_cpu(ne
->block_addr
));
179 nat_set_ino(e
, le32_to_cpu(ne
->ino
));
180 nat_set_version(e
, ne
->version
);
181 e
->checkpointed
= true;
183 write_unlock(&nm_i
->nat_tree_lock
);
186 static void set_node_addr(struct f2fs_sb_info
*sbi
, struct node_info
*ni
,
189 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
192 write_lock(&nm_i
->nat_tree_lock
);
193 e
= __lookup_nat_cache(nm_i
, ni
->nid
);
195 e
= grab_nat_entry(nm_i
, ni
->nid
);
197 write_unlock(&nm_i
->nat_tree_lock
);
201 e
->checkpointed
= true;
202 BUG_ON(ni
->blk_addr
== NEW_ADDR
);
203 } else if (new_blkaddr
== NEW_ADDR
) {
205 * when nid is reallocated,
206 * previous nat entry can be remained in nat cache.
207 * So, reinitialize it with new information.
210 BUG_ON(ni
->blk_addr
!= NULL_ADDR
);
213 if (new_blkaddr
== NEW_ADDR
)
214 e
->checkpointed
= false;
217 BUG_ON(nat_get_blkaddr(e
) != ni
->blk_addr
);
218 BUG_ON(nat_get_blkaddr(e
) == NULL_ADDR
&&
219 new_blkaddr
== NULL_ADDR
);
220 BUG_ON(nat_get_blkaddr(e
) == NEW_ADDR
&&
221 new_blkaddr
== NEW_ADDR
);
222 BUG_ON(nat_get_blkaddr(e
) != NEW_ADDR
&&
223 nat_get_blkaddr(e
) != NULL_ADDR
&&
224 new_blkaddr
== NEW_ADDR
);
226 /* increament version no as node is removed */
227 if (nat_get_blkaddr(e
) != NEW_ADDR
&& new_blkaddr
== NULL_ADDR
) {
228 unsigned char version
= nat_get_version(e
);
229 nat_set_version(e
, inc_node_version(version
));
233 nat_set_blkaddr(e
, new_blkaddr
);
234 __set_nat_cache_dirty(nm_i
, e
);
235 write_unlock(&nm_i
->nat_tree_lock
);
238 static int try_to_free_nats(struct f2fs_sb_info
*sbi
, int nr_shrink
)
240 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
242 if (nm_i
->nat_cnt
< 2 * NM_WOUT_THRESHOLD
)
245 write_lock(&nm_i
->nat_tree_lock
);
246 while (nr_shrink
&& !list_empty(&nm_i
->nat_entries
)) {
247 struct nat_entry
*ne
;
248 ne
= list_first_entry(&nm_i
->nat_entries
,
249 struct nat_entry
, list
);
250 __del_from_nat_cache(nm_i
, ne
);
253 write_unlock(&nm_i
->nat_tree_lock
);
258 * This function returns always success
260 void get_node_info(struct f2fs_sb_info
*sbi
, nid_t nid
, struct node_info
*ni
)
262 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
263 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
264 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
265 nid_t start_nid
= START_NID(nid
);
266 struct f2fs_nat_block
*nat_blk
;
267 struct page
*page
= NULL
;
268 struct f2fs_nat_entry ne
;
272 memset(&ne
, 0, sizeof(struct f2fs_nat_entry
));
275 /* Check nat cache */
276 read_lock(&nm_i
->nat_tree_lock
);
277 e
= __lookup_nat_cache(nm_i
, nid
);
279 ni
->ino
= nat_get_ino(e
);
280 ni
->blk_addr
= nat_get_blkaddr(e
);
281 ni
->version
= nat_get_version(e
);
283 read_unlock(&nm_i
->nat_tree_lock
);
287 /* Check current segment summary */
288 mutex_lock(&curseg
->curseg_mutex
);
289 i
= lookup_journal_in_cursum(sum
, NAT_JOURNAL
, nid
, 0);
291 ne
= nat_in_journal(sum
, i
);
292 node_info_from_raw_nat(ni
, &ne
);
294 mutex_unlock(&curseg
->curseg_mutex
);
298 /* Fill node_info from nat page */
299 page
= get_current_nat_page(sbi
, start_nid
);
300 nat_blk
= (struct f2fs_nat_block
*)page_address(page
);
301 ne
= nat_blk
->entries
[nid
- start_nid
];
302 node_info_from_raw_nat(ni
, &ne
);
303 f2fs_put_page(page
, 1);
305 /* cache nat entry */
306 cache_nat_entry(NM_I(sbi
), nid
, &ne
);
310 * The maximum depth is four.
311 * Offset[0] will have raw inode offset.
313 static int get_node_path(long block
, int offset
[4], unsigned int noffset
[4])
315 const long direct_index
= ADDRS_PER_INODE
;
316 const long direct_blks
= ADDRS_PER_BLOCK
;
317 const long dptrs_per_blk
= NIDS_PER_BLOCK
;
318 const long indirect_blks
= ADDRS_PER_BLOCK
* NIDS_PER_BLOCK
;
319 const long dindirect_blks
= indirect_blks
* NIDS_PER_BLOCK
;
325 if (block
< direct_index
) {
329 block
-= direct_index
;
330 if (block
< direct_blks
) {
331 offset
[n
++] = NODE_DIR1_BLOCK
;
337 block
-= direct_blks
;
338 if (block
< direct_blks
) {
339 offset
[n
++] = NODE_DIR2_BLOCK
;
345 block
-= direct_blks
;
346 if (block
< indirect_blks
) {
347 offset
[n
++] = NODE_IND1_BLOCK
;
349 offset
[n
++] = block
/ direct_blks
;
350 noffset
[n
] = 4 + offset
[n
- 1];
351 offset
[n
] = block
% direct_blks
;
355 block
-= indirect_blks
;
356 if (block
< indirect_blks
) {
357 offset
[n
++] = NODE_IND2_BLOCK
;
358 noffset
[n
] = 4 + dptrs_per_blk
;
359 offset
[n
++] = block
/ direct_blks
;
360 noffset
[n
] = 5 + dptrs_per_blk
+ offset
[n
- 1];
361 offset
[n
] = block
% direct_blks
;
365 block
-= indirect_blks
;
366 if (block
< dindirect_blks
) {
367 offset
[n
++] = NODE_DIND_BLOCK
;
368 noffset
[n
] = 5 + (dptrs_per_blk
* 2);
369 offset
[n
++] = block
/ indirect_blks
;
370 noffset
[n
] = 6 + (dptrs_per_blk
* 2) +
371 offset
[n
- 1] * (dptrs_per_blk
+ 1);
372 offset
[n
++] = (block
/ direct_blks
) % dptrs_per_blk
;
373 noffset
[n
] = 7 + (dptrs_per_blk
* 2) +
374 offset
[n
- 2] * (dptrs_per_blk
+ 1) +
376 offset
[n
] = block
% direct_blks
;
387 * Caller should call f2fs_put_dnode(dn).
389 int get_dnode_of_data(struct dnode_of_data
*dn
, pgoff_t index
, int mode
)
391 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
392 struct page
*npage
[4];
395 unsigned int noffset
[4];
400 level
= get_node_path(index
, offset
, noffset
);
402 nids
[0] = dn
->inode
->i_ino
;
403 npage
[0] = get_node_page(sbi
, nids
[0]);
404 if (IS_ERR(npage
[0]))
405 return PTR_ERR(npage
[0]);
409 nids
[1] = get_nid(parent
, offset
[0], true);
410 dn
->inode_page
= npage
[0];
411 dn
->inode_page_locked
= true;
413 /* get indirect or direct nodes */
414 for (i
= 1; i
<= level
; i
++) {
417 if (!nids
[i
] && mode
== ALLOC_NODE
) {
418 mutex_lock_op(sbi
, NODE_NEW
);
421 if (!alloc_nid(sbi
, &(nids
[i
]))) {
422 mutex_unlock_op(sbi
, NODE_NEW
);
428 npage
[i
] = new_node_page(dn
, noffset
[i
]);
429 if (IS_ERR(npage
[i
])) {
430 alloc_nid_failed(sbi
, nids
[i
]);
431 mutex_unlock_op(sbi
, NODE_NEW
);
432 err
= PTR_ERR(npage
[i
]);
436 set_nid(parent
, offset
[i
- 1], nids
[i
], i
== 1);
437 alloc_nid_done(sbi
, nids
[i
]);
438 mutex_unlock_op(sbi
, NODE_NEW
);
440 } else if (mode
== LOOKUP_NODE_RA
&& i
== level
&& level
> 1) {
441 npage
[i
] = get_node_page_ra(parent
, offset
[i
- 1]);
442 if (IS_ERR(npage
[i
])) {
443 err
= PTR_ERR(npage
[i
]);
449 dn
->inode_page_locked
= false;
452 f2fs_put_page(parent
, 1);
456 npage
[i
] = get_node_page(sbi
, nids
[i
]);
457 if (IS_ERR(npage
[i
])) {
458 err
= PTR_ERR(npage
[i
]);
459 f2fs_put_page(npage
[0], 0);
465 nids
[i
+ 1] = get_nid(parent
, offset
[i
], false);
468 dn
->nid
= nids
[level
];
469 dn
->ofs_in_node
= offset
[level
];
470 dn
->node_page
= npage
[level
];
471 dn
->data_blkaddr
= datablock_addr(dn
->node_page
, dn
->ofs_in_node
);
475 f2fs_put_page(parent
, 1);
477 f2fs_put_page(npage
[0], 0);
479 dn
->inode_page
= NULL
;
480 dn
->node_page
= NULL
;
484 static void truncate_node(struct dnode_of_data
*dn
)
486 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
489 get_node_info(sbi
, dn
->nid
, &ni
);
490 if (dn
->inode
->i_blocks
== 0) {
491 BUG_ON(ni
.blk_addr
!= NULL_ADDR
);
494 BUG_ON(ni
.blk_addr
== NULL_ADDR
);
496 /* Deallocate node address */
497 invalidate_blocks(sbi
, ni
.blk_addr
);
498 dec_valid_node_count(sbi
, dn
->inode
, 1);
499 set_node_addr(sbi
, &ni
, NULL_ADDR
);
501 if (dn
->nid
== dn
->inode
->i_ino
) {
502 remove_orphan_inode(sbi
, dn
->nid
);
503 dec_valid_inode_count(sbi
);
508 clear_node_page_dirty(dn
->node_page
);
509 F2FS_SET_SB_DIRT(sbi
);
511 f2fs_put_page(dn
->node_page
, 1);
512 dn
->node_page
= NULL
;
515 static int truncate_dnode(struct dnode_of_data
*dn
)
517 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
523 /* get direct node */
524 page
= get_node_page(sbi
, dn
->nid
);
525 if (IS_ERR(page
) && PTR_ERR(page
) == -ENOENT
)
527 else if (IS_ERR(page
))
528 return PTR_ERR(page
);
530 /* Make dnode_of_data for parameter */
531 dn
->node_page
= page
;
533 truncate_data_blocks(dn
);
538 static int truncate_nodes(struct dnode_of_data
*dn
, unsigned int nofs
,
541 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
542 struct dnode_of_data rdn
= *dn
;
544 struct f2fs_node
*rn
;
546 unsigned int child_nofs
;
551 return NIDS_PER_BLOCK
+ 1;
553 page
= get_node_page(sbi
, dn
->nid
);
555 return PTR_ERR(page
);
557 rn
= (struct f2fs_node
*)page_address(page
);
559 for (i
= ofs
; i
< NIDS_PER_BLOCK
; i
++, freed
++) {
560 child_nid
= le32_to_cpu(rn
->in
.nid
[i
]);
564 ret
= truncate_dnode(&rdn
);
567 set_nid(page
, i
, 0, false);
570 child_nofs
= nofs
+ ofs
* (NIDS_PER_BLOCK
+ 1) + 1;
571 for (i
= ofs
; i
< NIDS_PER_BLOCK
; i
++) {
572 child_nid
= le32_to_cpu(rn
->in
.nid
[i
]);
573 if (child_nid
== 0) {
574 child_nofs
+= NIDS_PER_BLOCK
+ 1;
578 ret
= truncate_nodes(&rdn
, child_nofs
, 0, depth
- 1);
579 if (ret
== (NIDS_PER_BLOCK
+ 1)) {
580 set_nid(page
, i
, 0, false);
582 } else if (ret
< 0 && ret
!= -ENOENT
) {
590 /* remove current indirect node */
591 dn
->node_page
= page
;
595 f2fs_put_page(page
, 1);
600 f2fs_put_page(page
, 1);
604 static int truncate_partial_nodes(struct dnode_of_data
*dn
,
605 struct f2fs_inode
*ri
, int *offset
, int depth
)
607 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
608 struct page
*pages
[2];
615 nid
[0] = le32_to_cpu(ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
]);
619 /* get indirect nodes in the path */
620 for (i
= 0; i
< depth
- 1; i
++) {
621 /* refernece count'll be increased */
622 pages
[i
] = get_node_page(sbi
, nid
[i
]);
623 if (IS_ERR(pages
[i
])) {
625 err
= PTR_ERR(pages
[i
]);
628 nid
[i
+ 1] = get_nid(pages
[i
], offset
[i
+ 1], false);
631 /* free direct nodes linked to a partial indirect node */
632 for (i
= offset
[depth
- 1]; i
< NIDS_PER_BLOCK
; i
++) {
633 child_nid
= get_nid(pages
[idx
], i
, false);
637 err
= truncate_dnode(dn
);
640 set_nid(pages
[idx
], i
, 0, false);
643 if (offset
[depth
- 1] == 0) {
644 dn
->node_page
= pages
[idx
];
648 f2fs_put_page(pages
[idx
], 1);
651 offset
[depth
- 1] = 0;
653 for (i
= depth
- 3; i
>= 0; i
--)
654 f2fs_put_page(pages
[i
], 1);
659 * All the block addresses of data and nodes should be nullified.
661 int truncate_inode_blocks(struct inode
*inode
, pgoff_t from
)
663 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
664 int err
= 0, cont
= 1;
665 int level
, offset
[4], noffset
[4];
666 unsigned int nofs
= 0;
667 struct f2fs_node
*rn
;
668 struct dnode_of_data dn
;
671 level
= get_node_path(from
, offset
, noffset
);
673 page
= get_node_page(sbi
, inode
->i_ino
);
675 return PTR_ERR(page
);
677 set_new_dnode(&dn
, inode
, page
, NULL
, 0);
680 rn
= page_address(page
);
688 if (!offset
[level
- 1])
690 err
= truncate_partial_nodes(&dn
, &rn
->i
, offset
, level
);
691 if (err
< 0 && err
!= -ENOENT
)
693 nofs
+= 1 + NIDS_PER_BLOCK
;
696 nofs
= 5 + 2 * NIDS_PER_BLOCK
;
697 if (!offset
[level
- 1])
699 err
= truncate_partial_nodes(&dn
, &rn
->i
, offset
, level
);
700 if (err
< 0 && err
!= -ENOENT
)
709 dn
.nid
= le32_to_cpu(rn
->i
.i_nid
[offset
[0] - NODE_DIR1_BLOCK
]);
711 case NODE_DIR1_BLOCK
:
712 case NODE_DIR2_BLOCK
:
713 err
= truncate_dnode(&dn
);
716 case NODE_IND1_BLOCK
:
717 case NODE_IND2_BLOCK
:
718 err
= truncate_nodes(&dn
, nofs
, offset
[1], 2);
721 case NODE_DIND_BLOCK
:
722 err
= truncate_nodes(&dn
, nofs
, offset
[1], 3);
729 if (err
< 0 && err
!= -ENOENT
)
731 if (offset
[1] == 0 &&
732 rn
->i
.i_nid
[offset
[0] - NODE_DIR1_BLOCK
]) {
734 wait_on_page_writeback(page
);
735 rn
->i
.i_nid
[offset
[0] - NODE_DIR1_BLOCK
] = 0;
736 set_page_dirty(page
);
744 f2fs_put_page(page
, 0);
745 return err
> 0 ? 0 : err
;
748 int remove_inode_page(struct inode
*inode
)
750 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
752 nid_t ino
= inode
->i_ino
;
753 struct dnode_of_data dn
;
755 mutex_lock_op(sbi
, NODE_TRUNC
);
756 page
= get_node_page(sbi
, ino
);
758 mutex_unlock_op(sbi
, NODE_TRUNC
);
759 return PTR_ERR(page
);
762 if (F2FS_I(inode
)->i_xattr_nid
) {
763 nid_t nid
= F2FS_I(inode
)->i_xattr_nid
;
764 struct page
*npage
= get_node_page(sbi
, nid
);
767 mutex_unlock_op(sbi
, NODE_TRUNC
);
768 return PTR_ERR(npage
);
771 F2FS_I(inode
)->i_xattr_nid
= 0;
772 set_new_dnode(&dn
, inode
, page
, npage
, nid
);
773 dn
.inode_page_locked
= 1;
777 /* 0 is possible, after f2fs_new_inode() is failed */
778 BUG_ON(inode
->i_blocks
!= 0 && inode
->i_blocks
!= 1);
779 set_new_dnode(&dn
, inode
, page
, page
, ino
);
782 mutex_unlock_op(sbi
, NODE_TRUNC
);
786 int new_inode_page(struct inode
*inode
, const struct qstr
*name
)
788 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
790 struct dnode_of_data dn
;
792 /* allocate inode page for new inode */
793 set_new_dnode(&dn
, inode
, NULL
, NULL
, inode
->i_ino
);
794 mutex_lock_op(sbi
, NODE_NEW
);
795 page
= new_node_page(&dn
, 0);
796 init_dent_inode(name
, page
);
797 mutex_unlock_op(sbi
, NODE_NEW
);
799 return PTR_ERR(page
);
800 f2fs_put_page(page
, 1);
804 struct page
*new_node_page(struct dnode_of_data
*dn
, unsigned int ofs
)
806 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
807 struct address_space
*mapping
= sbi
->node_inode
->i_mapping
;
808 struct node_info old_ni
, new_ni
;
812 if (is_inode_flag_set(F2FS_I(dn
->inode
), FI_NO_ALLOC
))
813 return ERR_PTR(-EPERM
);
815 page
= grab_cache_page(mapping
, dn
->nid
);
817 return ERR_PTR(-ENOMEM
);
819 get_node_info(sbi
, dn
->nid
, &old_ni
);
821 SetPageUptodate(page
);
822 fill_node_footer(page
, dn
->nid
, dn
->inode
->i_ino
, ofs
, true);
824 /* Reinitialize old_ni with new node page */
825 BUG_ON(old_ni
.blk_addr
!= NULL_ADDR
);
827 new_ni
.ino
= dn
->inode
->i_ino
;
829 if (!inc_valid_node_count(sbi
, dn
->inode
, 1)) {
833 set_node_addr(sbi
, &new_ni
, NEW_ADDR
);
834 set_cold_node(dn
->inode
, page
);
836 dn
->node_page
= page
;
838 set_page_dirty(page
);
840 inc_valid_inode_count(sbi
);
845 clear_node_page_dirty(page
);
846 f2fs_put_page(page
, 1);
850 static int read_node_page(struct page
*page
, int type
)
852 struct f2fs_sb_info
*sbi
= F2FS_SB(page
->mapping
->host
->i_sb
);
855 get_node_info(sbi
, page
->index
, &ni
);
857 if (ni
.blk_addr
== NULL_ADDR
) {
858 f2fs_put_page(page
, 1);
862 if (PageUptodate(page
)) {
867 return f2fs_readpage(sbi
, page
, ni
.blk_addr
, type
);
871 * Readahead a node page
873 void ra_node_page(struct f2fs_sb_info
*sbi
, nid_t nid
)
875 struct address_space
*mapping
= sbi
->node_inode
->i_mapping
;
878 apage
= find_get_page(mapping
, nid
);
879 if (apage
&& PageUptodate(apage
)) {
880 f2fs_put_page(apage
, 0);
883 f2fs_put_page(apage
, 0);
885 apage
= grab_cache_page(mapping
, nid
);
889 if (read_node_page(apage
, READA
) == 0)
890 f2fs_put_page(apage
, 0);
894 struct page
*get_node_page(struct f2fs_sb_info
*sbi
, pgoff_t nid
)
898 struct address_space
*mapping
= sbi
->node_inode
->i_mapping
;
900 page
= grab_cache_page(mapping
, nid
);
902 return ERR_PTR(-ENOMEM
);
904 err
= read_node_page(page
, READ_SYNC
);
909 if (!PageUptodate(page
)) {
910 f2fs_put_page(page
, 1);
911 return ERR_PTR(-EIO
);
913 BUG_ON(nid
!= nid_of_node(page
));
914 mark_page_accessed(page
);
919 * Return a locked page for the desired node page.
920 * And, readahead MAX_RA_NODE number of node pages.
922 struct page
*get_node_page_ra(struct page
*parent
, int start
)
924 struct f2fs_sb_info
*sbi
= F2FS_SB(parent
->mapping
->host
->i_sb
);
925 struct address_space
*mapping
= sbi
->node_inode
->i_mapping
;
931 /* First, try getting the desired direct node. */
932 nid
= get_nid(parent
, start
, false);
934 return ERR_PTR(-ENOENT
);
936 page
= grab_cache_page(mapping
, nid
);
938 return ERR_PTR(-ENOMEM
);
939 else if (PageUptodate(page
))
942 err
= read_node_page(page
, READ_SYNC
);
946 /* Then, try readahead for siblings of the desired node */
947 end
= start
+ MAX_RA_NODE
;
948 end
= min(end
, NIDS_PER_BLOCK
);
949 for (i
= start
+ 1; i
< end
; i
++) {
950 nid
= get_nid(parent
, i
, false);
953 ra_node_page(sbi
, nid
);
959 if (PageError(page
)) {
960 f2fs_put_page(page
, 1);
961 return ERR_PTR(-EIO
);
963 mark_page_accessed(page
);
967 void sync_inode_page(struct dnode_of_data
*dn
)
969 if (IS_INODE(dn
->node_page
) || dn
->inode_page
== dn
->node_page
) {
970 update_inode(dn
->inode
, dn
->node_page
);
971 } else if (dn
->inode_page
) {
972 if (!dn
->inode_page_locked
)
973 lock_page(dn
->inode_page
);
974 update_inode(dn
->inode
, dn
->inode_page
);
975 if (!dn
->inode_page_locked
)
976 unlock_page(dn
->inode_page
);
978 f2fs_write_inode(dn
->inode
, NULL
);
982 int sync_node_pages(struct f2fs_sb_info
*sbi
, nid_t ino
,
983 struct writeback_control
*wbc
)
985 struct address_space
*mapping
= sbi
->node_inode
->i_mapping
;
988 int step
= ino
? 2 : 0;
989 int nwritten
= 0, wrote
= 0;
991 pagevec_init(&pvec
, 0);
997 while (index
<= end
) {
999 nr_pages
= pagevec_lookup_tag(&pvec
, mapping
, &index
,
1000 PAGECACHE_TAG_DIRTY
,
1001 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
-1) + 1);
1005 for (i
= 0; i
< nr_pages
; i
++) {
1006 struct page
*page
= pvec
.pages
[i
];
1009 * flushing sequence with step:
1014 if (step
== 0 && IS_DNODE(page
))
1016 if (step
== 1 && (!IS_DNODE(page
) ||
1017 is_cold_node(page
)))
1019 if (step
== 2 && (!IS_DNODE(page
) ||
1020 !is_cold_node(page
)))
1025 * we should not skip writing node pages.
1027 if (ino
&& ino_of_node(page
) == ino
)
1029 else if (!trylock_page(page
))
1032 if (unlikely(page
->mapping
!= mapping
)) {
1037 if (ino
&& ino_of_node(page
) != ino
)
1038 goto continue_unlock
;
1040 if (!PageDirty(page
)) {
1041 /* someone wrote it for us */
1042 goto continue_unlock
;
1045 if (!clear_page_dirty_for_io(page
))
1046 goto continue_unlock
;
1048 /* called by fsync() */
1049 if (ino
&& IS_DNODE(page
)) {
1050 int mark
= !is_checkpointed_node(sbi
, ino
);
1051 set_fsync_mark(page
, 1);
1053 set_dentry_mark(page
, mark
);
1056 set_fsync_mark(page
, 0);
1057 set_dentry_mark(page
, 0);
1059 mapping
->a_ops
->writepage(page
, wbc
);
1062 if (--wbc
->nr_to_write
== 0)
1065 pagevec_release(&pvec
);
1068 if (wbc
->nr_to_write
== 0) {
1080 f2fs_submit_bio(sbi
, NODE
, wbc
->sync_mode
== WB_SYNC_ALL
);
1085 static int f2fs_write_node_page(struct page
*page
,
1086 struct writeback_control
*wbc
)
1088 struct f2fs_sb_info
*sbi
= F2FS_SB(page
->mapping
->host
->i_sb
);
1091 struct node_info ni
;
1093 wait_on_page_writeback(page
);
1095 mutex_lock_op(sbi
, NODE_WRITE
);
1097 /* get old block addr of this node page */
1098 nid
= nid_of_node(page
);
1099 BUG_ON(page
->index
!= nid
);
1101 get_node_info(sbi
, nid
, &ni
);
1103 /* This page is already truncated */
1104 if (ni
.blk_addr
== NULL_ADDR
)
1107 if (wbc
->for_reclaim
) {
1108 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1109 wbc
->pages_skipped
++;
1110 set_page_dirty(page
);
1111 mutex_unlock_op(sbi
, NODE_WRITE
);
1112 return AOP_WRITEPAGE_ACTIVATE
;
1115 set_page_writeback(page
);
1117 /* insert node offset */
1118 write_node_page(sbi
, page
, nid
, ni
.blk_addr
, &new_addr
);
1119 set_node_addr(sbi
, &ni
, new_addr
);
1121 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1122 mutex_unlock_op(sbi
, NODE_WRITE
);
1128 * It is very important to gather dirty pages and write at once, so that we can
1129 * submit a big bio without interfering other data writes.
1130 * Be default, 512 pages (2MB), a segment size, is quite reasonable.
1132 #define COLLECT_DIRTY_NODES 512
1133 static int f2fs_write_node_pages(struct address_space
*mapping
,
1134 struct writeback_control
*wbc
)
1136 struct f2fs_sb_info
*sbi
= F2FS_SB(mapping
->host
->i_sb
);
1137 struct block_device
*bdev
= sbi
->sb
->s_bdev
;
1138 long nr_to_write
= wbc
->nr_to_write
;
1140 /* First check balancing cached NAT entries */
1141 if (try_to_free_nats(sbi
, NAT_ENTRY_PER_BLOCK
)) {
1142 write_checkpoint(sbi
, false);
1146 /* collect a number of dirty node pages and write together */
1147 if (get_pages(sbi
, F2FS_DIRTY_NODES
) < COLLECT_DIRTY_NODES
)
1150 /* if mounting is failed, skip writing node pages */
1151 wbc
->nr_to_write
= bio_get_nr_vecs(bdev
);
1152 sync_node_pages(sbi
, 0, wbc
);
1153 wbc
->nr_to_write
= nr_to_write
-
1154 (bio_get_nr_vecs(bdev
) - wbc
->nr_to_write
);
1158 static int f2fs_set_node_page_dirty(struct page
*page
)
1160 struct address_space
*mapping
= page
->mapping
;
1161 struct f2fs_sb_info
*sbi
= F2FS_SB(mapping
->host
->i_sb
);
1163 SetPageUptodate(page
);
1164 if (!PageDirty(page
)) {
1165 __set_page_dirty_nobuffers(page
);
1166 inc_page_count(sbi
, F2FS_DIRTY_NODES
);
1167 SetPagePrivate(page
);
1173 static void f2fs_invalidate_node_page(struct page
*page
, unsigned long offset
)
1175 struct inode
*inode
= page
->mapping
->host
;
1176 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
1177 if (PageDirty(page
))
1178 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1179 ClearPagePrivate(page
);
1182 static int f2fs_release_node_page(struct page
*page
, gfp_t wait
)
1184 ClearPagePrivate(page
);
1189 * Structure of the f2fs node operations
1191 const struct address_space_operations f2fs_node_aops
= {
1192 .writepage
= f2fs_write_node_page
,
1193 .writepages
= f2fs_write_node_pages
,
1194 .set_page_dirty
= f2fs_set_node_page_dirty
,
1195 .invalidatepage
= f2fs_invalidate_node_page
,
1196 .releasepage
= f2fs_release_node_page
,
1199 static struct free_nid
*__lookup_free_nid_list(nid_t n
, struct list_head
*head
)
1201 struct list_head
*this;
1203 list_for_each(this, head
) {
1204 i
= list_entry(this, struct free_nid
, list
);
1211 static void __del_from_free_nid_list(struct free_nid
*i
)
1214 kmem_cache_free(free_nid_slab
, i
);
1217 static int add_free_nid(struct f2fs_nm_info
*nm_i
, nid_t nid
)
1221 if (nm_i
->fcnt
> 2 * MAX_FREE_NIDS
)
1224 i
= kmem_cache_alloc(free_nid_slab
, GFP_NOFS
);
1232 spin_lock(&nm_i
->free_nid_list_lock
);
1233 if (__lookup_free_nid_list(nid
, &nm_i
->free_nid_list
)) {
1234 spin_unlock(&nm_i
->free_nid_list_lock
);
1235 kmem_cache_free(free_nid_slab
, i
);
1238 list_add_tail(&i
->list
, &nm_i
->free_nid_list
);
1240 spin_unlock(&nm_i
->free_nid_list_lock
);
1244 static void remove_free_nid(struct f2fs_nm_info
*nm_i
, nid_t nid
)
1247 spin_lock(&nm_i
->free_nid_list_lock
);
1248 i
= __lookup_free_nid_list(nid
, &nm_i
->free_nid_list
);
1249 if (i
&& i
->state
== NID_NEW
) {
1250 __del_from_free_nid_list(i
);
1253 spin_unlock(&nm_i
->free_nid_list_lock
);
1256 static int scan_nat_page(struct f2fs_nm_info
*nm_i
,
1257 struct page
*nat_page
, nid_t start_nid
)
1259 struct f2fs_nat_block
*nat_blk
= page_address(nat_page
);
1264 /* 0 nid should not be used */
1268 i
= start_nid
% NAT_ENTRY_PER_BLOCK
;
1270 for (; i
< NAT_ENTRY_PER_BLOCK
; i
++, start_nid
++) {
1271 blk_addr
= le32_to_cpu(nat_blk
->entries
[i
].block_addr
);
1272 BUG_ON(blk_addr
== NEW_ADDR
);
1273 if (blk_addr
== NULL_ADDR
)
1274 fcnt
+= add_free_nid(nm_i
, start_nid
);
1279 static void build_free_nids(struct f2fs_sb_info
*sbi
)
1281 struct free_nid
*fnid
, *next_fnid
;
1282 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1283 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
1284 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
1286 bool is_cycled
= false;
1290 nid
= nm_i
->next_scan_nid
;
1291 nm_i
->init_scan_nid
= nid
;
1293 ra_nat_pages(sbi
, nid
);
1296 struct page
*page
= get_current_nat_page(sbi
, nid
);
1298 fcnt
+= scan_nat_page(nm_i
, page
, nid
);
1299 f2fs_put_page(page
, 1);
1301 nid
+= (NAT_ENTRY_PER_BLOCK
- (nid
% NAT_ENTRY_PER_BLOCK
));
1303 if (nid
>= nm_i
->max_nid
) {
1307 if (fcnt
> MAX_FREE_NIDS
)
1309 if (is_cycled
&& nm_i
->init_scan_nid
<= nid
)
1313 /* go to the next nat page in order to reuse free nids first */
1314 nm_i
->next_scan_nid
= nm_i
->init_scan_nid
+ NAT_ENTRY_PER_BLOCK
;
1316 /* find free nids from current sum_pages */
1317 mutex_lock(&curseg
->curseg_mutex
);
1318 for (i
= 0; i
< nats_in_cursum(sum
); i
++) {
1319 block_t addr
= le32_to_cpu(nat_in_journal(sum
, i
).block_addr
);
1320 nid
= le32_to_cpu(nid_in_journal(sum
, i
));
1321 if (addr
== NULL_ADDR
)
1322 add_free_nid(nm_i
, nid
);
1324 remove_free_nid(nm_i
, nid
);
1326 mutex_unlock(&curseg
->curseg_mutex
);
1328 /* remove the free nids from current allocated nids */
1329 list_for_each_entry_safe(fnid
, next_fnid
, &nm_i
->free_nid_list
, list
) {
1330 struct nat_entry
*ne
;
1332 read_lock(&nm_i
->nat_tree_lock
);
1333 ne
= __lookup_nat_cache(nm_i
, fnid
->nid
);
1334 if (ne
&& nat_get_blkaddr(ne
) != NULL_ADDR
)
1335 remove_free_nid(nm_i
, fnid
->nid
);
1336 read_unlock(&nm_i
->nat_tree_lock
);
1341 * If this function returns success, caller can obtain a new nid
1342 * from second parameter of this function.
1343 * The returned nid could be used ino as well as nid when inode is created.
1345 bool alloc_nid(struct f2fs_sb_info
*sbi
, nid_t
*nid
)
1347 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1348 struct free_nid
*i
= NULL
;
1349 struct list_head
*this;
1351 mutex_lock(&nm_i
->build_lock
);
1353 /* scan NAT in order to build free nid list */
1354 build_free_nids(sbi
);
1356 mutex_unlock(&nm_i
->build_lock
);
1360 mutex_unlock(&nm_i
->build_lock
);
1363 * We check fcnt again since previous check is racy as
1364 * we didn't hold free_nid_list_lock. So other thread
1365 * could consume all of free nids.
1367 spin_lock(&nm_i
->free_nid_list_lock
);
1369 spin_unlock(&nm_i
->free_nid_list_lock
);
1373 BUG_ON(list_empty(&nm_i
->free_nid_list
));
1374 list_for_each(this, &nm_i
->free_nid_list
) {
1375 i
= list_entry(this, struct free_nid
, list
);
1376 if (i
->state
== NID_NEW
)
1380 BUG_ON(i
->state
!= NID_NEW
);
1382 i
->state
= NID_ALLOC
;
1384 spin_unlock(&nm_i
->free_nid_list_lock
);
1389 * alloc_nid() should be called prior to this function.
1391 void alloc_nid_done(struct f2fs_sb_info
*sbi
, nid_t nid
)
1393 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1396 spin_lock(&nm_i
->free_nid_list_lock
);
1397 i
= __lookup_free_nid_list(nid
, &nm_i
->free_nid_list
);
1399 BUG_ON(i
->state
!= NID_ALLOC
);
1400 __del_from_free_nid_list(i
);
1402 spin_unlock(&nm_i
->free_nid_list_lock
);
1406 * alloc_nid() should be called prior to this function.
1408 void alloc_nid_failed(struct f2fs_sb_info
*sbi
, nid_t nid
)
1410 alloc_nid_done(sbi
, nid
);
1411 add_free_nid(NM_I(sbi
), nid
);
1414 void recover_node_page(struct f2fs_sb_info
*sbi
, struct page
*page
,
1415 struct f2fs_summary
*sum
, struct node_info
*ni
,
1416 block_t new_blkaddr
)
1418 rewrite_node_page(sbi
, page
, sum
, ni
->blk_addr
, new_blkaddr
);
1419 set_node_addr(sbi
, ni
, new_blkaddr
);
1420 clear_node_page_dirty(page
);
1423 int recover_inode_page(struct f2fs_sb_info
*sbi
, struct page
*page
)
1425 struct address_space
*mapping
= sbi
->node_inode
->i_mapping
;
1426 struct f2fs_node
*src
, *dst
;
1427 nid_t ino
= ino_of_node(page
);
1428 struct node_info old_ni
, new_ni
;
1431 ipage
= grab_cache_page(mapping
, ino
);
1435 /* Should not use this inode from free nid list */
1436 remove_free_nid(NM_I(sbi
), ino
);
1438 get_node_info(sbi
, ino
, &old_ni
);
1439 SetPageUptodate(ipage
);
1440 fill_node_footer(ipage
, ino
, ino
, 0, true);
1442 src
= (struct f2fs_node
*)page_address(page
);
1443 dst
= (struct f2fs_node
*)page_address(ipage
);
1445 memcpy(dst
, src
, (unsigned long)&src
->i
.i_ext
- (unsigned long)&src
->i
);
1447 dst
->i
.i_blocks
= cpu_to_le64(1);
1448 dst
->i
.i_links
= cpu_to_le32(1);
1449 dst
->i
.i_xattr_nid
= 0;
1454 set_node_addr(sbi
, &new_ni
, NEW_ADDR
);
1455 inc_valid_inode_count(sbi
);
1457 f2fs_put_page(ipage
, 1);
1461 int restore_node_summary(struct f2fs_sb_info
*sbi
,
1462 unsigned int segno
, struct f2fs_summary_block
*sum
)
1464 struct f2fs_node
*rn
;
1465 struct f2fs_summary
*sum_entry
;
1470 /* alloc temporal page for read node */
1471 page
= alloc_page(GFP_NOFS
| __GFP_ZERO
);
1473 return PTR_ERR(page
);
1476 /* scan the node segment */
1477 last_offset
= sbi
->blocks_per_seg
;
1478 addr
= START_BLOCK(sbi
, segno
);
1479 sum_entry
= &sum
->entries
[0];
1481 for (i
= 0; i
< last_offset
; i
++, sum_entry
++) {
1483 * In order to read next node page,
1484 * we must clear PageUptodate flag.
1486 ClearPageUptodate(page
);
1488 if (f2fs_readpage(sbi
, page
, addr
, READ_SYNC
))
1492 rn
= (struct f2fs_node
*)page_address(page
);
1493 sum_entry
->nid
= rn
->footer
.nid
;
1494 sum_entry
->version
= 0;
1495 sum_entry
->ofs_in_node
= 0;
1500 __free_pages(page
, 0);
1504 static bool flush_nats_in_journal(struct f2fs_sb_info
*sbi
)
1506 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1507 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
1508 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
1511 mutex_lock(&curseg
->curseg_mutex
);
1513 if (nats_in_cursum(sum
) < NAT_JOURNAL_ENTRIES
) {
1514 mutex_unlock(&curseg
->curseg_mutex
);
1518 for (i
= 0; i
< nats_in_cursum(sum
); i
++) {
1519 struct nat_entry
*ne
;
1520 struct f2fs_nat_entry raw_ne
;
1521 nid_t nid
= le32_to_cpu(nid_in_journal(sum
, i
));
1523 raw_ne
= nat_in_journal(sum
, i
);
1525 write_lock(&nm_i
->nat_tree_lock
);
1526 ne
= __lookup_nat_cache(nm_i
, nid
);
1528 __set_nat_cache_dirty(nm_i
, ne
);
1529 write_unlock(&nm_i
->nat_tree_lock
);
1532 ne
= grab_nat_entry(nm_i
, nid
);
1534 write_unlock(&nm_i
->nat_tree_lock
);
1537 nat_set_blkaddr(ne
, le32_to_cpu(raw_ne
.block_addr
));
1538 nat_set_ino(ne
, le32_to_cpu(raw_ne
.ino
));
1539 nat_set_version(ne
, raw_ne
.version
);
1540 __set_nat_cache_dirty(nm_i
, ne
);
1541 write_unlock(&nm_i
->nat_tree_lock
);
1543 update_nats_in_cursum(sum
, -i
);
1544 mutex_unlock(&curseg
->curseg_mutex
);
1549 * This function is called during the checkpointing process.
1551 void flush_nat_entries(struct f2fs_sb_info
*sbi
)
1553 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1554 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
1555 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
1556 struct list_head
*cur
, *n
;
1557 struct page
*page
= NULL
;
1558 struct f2fs_nat_block
*nat_blk
= NULL
;
1559 nid_t start_nid
= 0, end_nid
= 0;
1562 flushed
= flush_nats_in_journal(sbi
);
1565 mutex_lock(&curseg
->curseg_mutex
);
1567 /* 1) flush dirty nat caches */
1568 list_for_each_safe(cur
, n
, &nm_i
->dirty_nat_entries
) {
1569 struct nat_entry
*ne
;
1571 struct f2fs_nat_entry raw_ne
;
1573 block_t new_blkaddr
;
1575 ne
= list_entry(cur
, struct nat_entry
, list
);
1576 nid
= nat_get_nid(ne
);
1578 if (nat_get_blkaddr(ne
) == NEW_ADDR
)
1583 /* if there is room for nat enries in curseg->sumpage */
1584 offset
= lookup_journal_in_cursum(sum
, NAT_JOURNAL
, nid
, 1);
1586 raw_ne
= nat_in_journal(sum
, offset
);
1590 if (!page
|| (start_nid
> nid
|| nid
> end_nid
)) {
1592 f2fs_put_page(page
, 1);
1595 start_nid
= START_NID(nid
);
1596 end_nid
= start_nid
+ NAT_ENTRY_PER_BLOCK
- 1;
1599 * get nat block with dirty flag, increased reference
1600 * count, mapped and lock
1602 page
= get_next_nat_page(sbi
, start_nid
);
1603 nat_blk
= page_address(page
);
1607 raw_ne
= nat_blk
->entries
[nid
- start_nid
];
1609 new_blkaddr
= nat_get_blkaddr(ne
);
1611 raw_ne
.ino
= cpu_to_le32(nat_get_ino(ne
));
1612 raw_ne
.block_addr
= cpu_to_le32(new_blkaddr
);
1613 raw_ne
.version
= nat_get_version(ne
);
1616 nat_blk
->entries
[nid
- start_nid
] = raw_ne
;
1618 nat_in_journal(sum
, offset
) = raw_ne
;
1619 nid_in_journal(sum
, offset
) = cpu_to_le32(nid
);
1622 if (nat_get_blkaddr(ne
) == NULL_ADDR
) {
1623 write_lock(&nm_i
->nat_tree_lock
);
1624 __del_from_nat_cache(nm_i
, ne
);
1625 write_unlock(&nm_i
->nat_tree_lock
);
1626 add_free_nid(NM_I(sbi
), nid
);
1628 write_lock(&nm_i
->nat_tree_lock
);
1629 __clear_nat_cache_dirty(nm_i
, ne
);
1630 ne
->checkpointed
= true;
1631 write_unlock(&nm_i
->nat_tree_lock
);
1635 mutex_unlock(&curseg
->curseg_mutex
);
1636 f2fs_put_page(page
, 1);
1638 /* 2) shrink nat caches if necessary */
1639 try_to_free_nats(sbi
, nm_i
->nat_cnt
- NM_WOUT_THRESHOLD
);
1642 static int init_node_manager(struct f2fs_sb_info
*sbi
)
1644 struct f2fs_super_block
*sb_raw
= F2FS_RAW_SUPER(sbi
);
1645 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1646 unsigned char *version_bitmap
;
1647 unsigned int nat_segs
, nat_blocks
;
1649 nm_i
->nat_blkaddr
= le32_to_cpu(sb_raw
->nat_blkaddr
);
1651 /* segment_count_nat includes pair segment so divide to 2. */
1652 nat_segs
= le32_to_cpu(sb_raw
->segment_count_nat
) >> 1;
1653 nat_blocks
= nat_segs
<< le32_to_cpu(sb_raw
->log_blocks_per_seg
);
1654 nm_i
->max_nid
= NAT_ENTRY_PER_BLOCK
* nat_blocks
;
1658 INIT_LIST_HEAD(&nm_i
->free_nid_list
);
1659 INIT_RADIX_TREE(&nm_i
->nat_root
, GFP_ATOMIC
);
1660 INIT_LIST_HEAD(&nm_i
->nat_entries
);
1661 INIT_LIST_HEAD(&nm_i
->dirty_nat_entries
);
1663 mutex_init(&nm_i
->build_lock
);
1664 spin_lock_init(&nm_i
->free_nid_list_lock
);
1665 rwlock_init(&nm_i
->nat_tree_lock
);
1667 nm_i
->bitmap_size
= __bitmap_size(sbi
, NAT_BITMAP
);
1668 nm_i
->init_scan_nid
= le32_to_cpu(sbi
->ckpt
->next_free_nid
);
1669 nm_i
->next_scan_nid
= le32_to_cpu(sbi
->ckpt
->next_free_nid
);
1671 nm_i
->nat_bitmap
= kzalloc(nm_i
->bitmap_size
, GFP_KERNEL
);
1672 if (!nm_i
->nat_bitmap
)
1674 version_bitmap
= __bitmap_ptr(sbi
, NAT_BITMAP
);
1675 if (!version_bitmap
)
1678 /* copy version bitmap */
1679 memcpy(nm_i
->nat_bitmap
, version_bitmap
, nm_i
->bitmap_size
);
1683 int build_node_manager(struct f2fs_sb_info
*sbi
)
1687 sbi
->nm_info
= kzalloc(sizeof(struct f2fs_nm_info
), GFP_KERNEL
);
1691 err
= init_node_manager(sbi
);
1695 build_free_nids(sbi
);
1699 void destroy_node_manager(struct f2fs_sb_info
*sbi
)
1701 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1702 struct free_nid
*i
, *next_i
;
1703 struct nat_entry
*natvec
[NATVEC_SIZE
];
1710 /* destroy free nid list */
1711 spin_lock(&nm_i
->free_nid_list_lock
);
1712 list_for_each_entry_safe(i
, next_i
, &nm_i
->free_nid_list
, list
) {
1713 BUG_ON(i
->state
== NID_ALLOC
);
1714 __del_from_free_nid_list(i
);
1718 spin_unlock(&nm_i
->free_nid_list_lock
);
1720 /* destroy nat cache */
1721 write_lock(&nm_i
->nat_tree_lock
);
1722 while ((found
= __gang_lookup_nat_cache(nm_i
,
1723 nid
, NATVEC_SIZE
, natvec
))) {
1725 for (idx
= 0; idx
< found
; idx
++) {
1726 struct nat_entry
*e
= natvec
[idx
];
1727 nid
= nat_get_nid(e
) + 1;
1728 __del_from_nat_cache(nm_i
, e
);
1731 BUG_ON(nm_i
->nat_cnt
);
1732 write_unlock(&nm_i
->nat_tree_lock
);
1734 kfree(nm_i
->nat_bitmap
);
1735 sbi
->nm_info
= NULL
;
1739 int __init
create_node_manager_caches(void)
1741 nat_entry_slab
= f2fs_kmem_cache_create("nat_entry",
1742 sizeof(struct nat_entry
), NULL
);
1743 if (!nat_entry_slab
)
1746 free_nid_slab
= f2fs_kmem_cache_create("free_nid",
1747 sizeof(struct free_nid
), NULL
);
1748 if (!free_nid_slab
) {
1749 kmem_cache_destroy(nat_entry_slab
);
1755 void destroy_node_manager_caches(void)
1757 kmem_cache_destroy(free_nid_slab
);
1758 kmem_cache_destroy(nat_entry_slab
);