If any f2fs_bug_on is triggered, fsck.f2fs is needed.
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
void release_orphan_inode(struct f2fs_sb_info *sbi)
{
spin_lock(&sbi->ino_lock[ORPHAN_INO]);
- f2fs_bug_on(sbi->n_orphans == 0);
+ f2fs_bug_on(sbi, sbi->n_orphans == 0);
sbi->n_orphans--;
spin_unlock(&sbi->ino_lock[ORPHAN_INO]);
}
static void recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
{
struct inode *inode = f2fs_iget(sbi->sb, ino);
- f2fs_bug_on(IS_ERR(inode));
+ f2fs_bug_on(sbi, IS_ERR(inode));
clear_nlink(inode);
/* truncate all the data during iput */
list_for_each_entry(orphan, head, list) {
if (!page) {
page = find_get_page(META_MAPPING(sbi), start_blk++);
- f2fs_bug_on(!page);
+ f2fs_bug_on(sbi, !page);
orphan_blk =
(struct f2fs_orphan_block *)page_address(page);
memset(orphan_blk, 0, sizeof(*orphan_blk));
int err;
/* if inode_page exists, index should be zero */
- f2fs_bug_on(!need_put && index);
+ f2fs_bug_on(F2FS_I_SB(dn->inode), !need_put && index);
err = get_dnode_of_data(dn, index, ALLOC_NODE);
if (err)
block_t start_blkaddr, end_blkaddr;
int need_update = true;
- f2fs_bug_on(blk_addr == NEW_ADDR);
+ f2fs_bug_on(F2FS_I_SB(dn->inode), blk_addr == NEW_ADDR);
fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
dn->ofs_in_node;
* For the most part, it should be a bug when name_len is zero.
* We stop here for figuring out where the bugs has occurred.
*/
- f2fs_bug_on(!de->name_len);
+ f2fs_bug_on(F2FS_P_SB(dentry_page), !de->name_len);
bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
}
bool room = false;
int max_slots = 0;
- f2fs_bug_on(level > MAX_DIR_HASH_DEPTH);
+ f2fs_bug_on(F2FS_I_SB(dir), level > MAX_DIR_HASH_DEPTH);
nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level);
nblock = bucket_blocks(level);
#include <linux/sched.h>
#ifdef CONFIG_F2FS_CHECK_FS
-#define f2fs_bug_on(condition) BUG_ON(condition)
+#define f2fs_bug_on(sbi, condition) BUG_ON(condition)
#define f2fs_down_write(x, y) down_write_nest_lock(x, y)
#else
-#define f2fs_bug_on(condition) WARN_ON(condition)
+#define f2fs_bug_on(sbi, condition) \
+ do { \
+ if (unlikely(condition)) { \
+ WARN_ON(1); \
+ sbi->need_fsck = true; \
+ } \
+ } while (0)
#define f2fs_down_write(x, y) down_write(x)
#endif
blkcnt_t count)
{
spin_lock(&sbi->stat_lock);
- f2fs_bug_on(sbi->total_valid_block_count < (block_t) count);
- f2fs_bug_on(inode->i_blocks < count);
+ f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count);
+ f2fs_bug_on(sbi, inode->i_blocks < count);
inode->i_blocks -= count;
sbi->total_valid_block_count -= (block_t)count;
spin_unlock(&sbi->stat_lock);
{
spin_lock(&sbi->stat_lock);
- f2fs_bug_on(!sbi->total_valid_block_count);
- f2fs_bug_on(!sbi->total_valid_node_count);
- f2fs_bug_on(!inode->i_blocks);
+ f2fs_bug_on(sbi, !sbi->total_valid_block_count);
+ f2fs_bug_on(sbi, !sbi->total_valid_node_count);
+ f2fs_bug_on(sbi, !inode->i_blocks);
inode->i_blocks--;
sbi->total_valid_node_count--;
static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
{
spin_lock(&sbi->stat_lock);
- f2fs_bug_on(sbi->total_valid_inode_count == sbi->total_node_count);
+ f2fs_bug_on(sbi, sbi->total_valid_inode_count == sbi->total_node_count);
sbi->total_valid_inode_count++;
spin_unlock(&sbi->stat_lock);
}
static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi)
{
spin_lock(&sbi->stat_lock);
- f2fs_bug_on(!sbi->total_valid_inode_count);
+ f2fs_bug_on(sbi, !sbi->total_valid_inode_count);
sbi->total_valid_inode_count--;
spin_unlock(&sbi->stat_lock);
}
return;
if (unlock) {
- f2fs_bug_on(!PageLocked(page));
+ f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
unlock_page(page);
}
page_cache_release(page);
count = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
count -= dn.ofs_in_node;
- f2fs_bug_on(count < 0);
+ f2fs_bug_on(sbi, count < 0);
if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
truncate_data_blocks_range(&dn, count);
ri && (ri->i_inline & F2FS_INLINE_DATA)) {
process_inline:
ipage = get_node_page(sbi, inode->i_ino);
- f2fs_bug_on(IS_ERR(ipage));
+ f2fs_bug_on(sbi, IS_ERR(ipage));
f2fs_wait_on_page_writeback(ipage, NODE);
if (f2fs_has_inline_data(inode)) {
ipage = get_node_page(sbi, inode->i_ino);
- f2fs_bug_on(IS_ERR(ipage));
+ f2fs_bug_on(sbi, IS_ERR(ipage));
f2fs_wait_on_page_writeback(ipage, NODE);
zero_user_segment(ipage, INLINE_DATA_OFFSET,
INLINE_DATA_OFFSET + MAX_INLINE_DATA);
inode->i_ino == F2FS_META_INO(sbi))
goto out_clear;
- f2fs_bug_on(get_dirty_dents(inode));
+ f2fs_bug_on(sbi, get_dirty_dents(inode));
remove_dirty_dir_inode(inode);
if (inode->i_nlink || is_bad_inode(inode))
/* get current nat block page with lock */
src_page = get_meta_page(sbi, src_off);
dst_page = grab_meta_page(sbi, dst_off);
- f2fs_bug_on(PageDirty(src_page));
+ f2fs_bug_on(sbi, PageDirty(src_page));
src_addr = page_address(src_page);
dst_addr = page_address(dst_page);
goto retry;
}
e->ni = *ni;
- f2fs_bug_on(ni->blk_addr == NEW_ADDR);
+ f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
} else if (new_blkaddr == NEW_ADDR) {
/*
* when nid is reallocated,
* So, reinitialize it with new information.
*/
e->ni = *ni;
- f2fs_bug_on(ni->blk_addr != NULL_ADDR);
+ f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
}
/* sanity check */
- f2fs_bug_on(nat_get_blkaddr(e) != ni->blk_addr);
- f2fs_bug_on(nat_get_blkaddr(e) == NULL_ADDR &&
+ f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
+ f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
new_blkaddr == NULL_ADDR);
- f2fs_bug_on(nat_get_blkaddr(e) == NEW_ADDR &&
+ f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
new_blkaddr == NEW_ADDR);
- f2fs_bug_on(nat_get_blkaddr(e) != NEW_ADDR &&
+ f2fs_bug_on(sbi, nat_get_blkaddr(e) != NEW_ADDR &&
nat_get_blkaddr(e) != NULL_ADDR &&
new_blkaddr == NEW_ADDR);
get_node_info(sbi, dn->nid, &ni);
if (dn->inode->i_blocks == 0) {
- f2fs_bug_on(ni.blk_addr != NULL_ADDR);
+ f2fs_bug_on(sbi, ni.blk_addr != NULL_ADDR);
goto invalidate;
}
- f2fs_bug_on(ni.blk_addr == NULL_ADDR);
+ f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR);
/* Deallocate node address */
invalidate_blocks(sbi, ni.blk_addr);
truncate_data_blocks_range(&dn, 1);
/* 0 is possible, after f2fs_new_inode() has failed */
- f2fs_bug_on(inode->i_blocks != 0 && inode->i_blocks != 1);
+ f2fs_bug_on(F2FS_I_SB(inode),
+ inode->i_blocks != 0 && inode->i_blocks != 1);
/* will put inode & node pages */
truncate_node(&dn);
get_node_info(sbi, dn->nid, &old_ni);
/* Reinitialize old_ni with new node page */
- f2fs_bug_on(old_ni.blk_addr != NULL_ADDR);
+ f2fs_bug_on(sbi, old_ni.blk_addr != NULL_ADDR);
new_ni = old_ni;
new_ni.ino = dn->inode->i_ino;
set_node_addr(sbi, &new_ni, NEW_ADDR, false);
/* get old block addr of this node page */
nid = nid_of_node(page);
- f2fs_bug_on(page->index != nid);
+ f2fs_bug_on(sbi, page->index != nid);
get_node_info(sbi, nid, &ni);
break;
blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
- f2fs_bug_on(blk_addr == NEW_ADDR);
+ f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
if (blk_addr == NULL_ADDR) {
if (add_free_nid(sbi, start_nid, true) < 0)
break;
/* We should not use stale free nids created by build_free_nids */
if (nm_i->fcnt && !on_build_free_nids(nm_i)) {
- f2fs_bug_on(list_empty(&nm_i->free_nid_list));
+ f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
list_for_each_entry(i, &nm_i->free_nid_list, list)
if (i->state == NID_NEW)
break;
- f2fs_bug_on(i->state != NID_NEW);
+ f2fs_bug_on(sbi, i->state != NID_NEW);
*nid = i->nid;
i->state = NID_ALLOC;
nm_i->fcnt--;
spin_lock(&nm_i->free_nid_list_lock);
i = __lookup_free_nid_list(nm_i, nid);
- f2fs_bug_on(!i || i->state != NID_ALLOC);
+ f2fs_bug_on(sbi, !i || i->state != NID_ALLOC);
__del_from_free_nid_list(nm_i, i);
spin_unlock(&nm_i->free_nid_list_lock);
spin_lock(&nm_i->free_nid_list_lock);
i = __lookup_free_nid_list(nm_i, nid);
- f2fs_bug_on(!i || i->state != NID_ALLOC);
+ f2fs_bug_on(sbi, !i || i->state != NID_ALLOC);
if (!available_free_memory(sbi, FREE_NIDS)) {
__del_from_free_nid_list(nm_i, i);
need_free = true;
struct f2fs_inode *ri;
ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
- f2fs_bug_on(IS_ERR(ipage));
+ f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage));
ri = F2FS_INODE(page);
if (!(ri->i_inline & F2FS_INLINE_XATTR)) {
/* Deallocate node address */
get_node_info(sbi, prev_xnid, &ni);
- f2fs_bug_on(ni.blk_addr == NULL_ADDR);
+ f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR);
invalidate_blocks(sbi, ni.blk_addr);
dec_valid_node_count(sbi, inode);
set_node_addr(sbi, &ni, NULL_ADDR, false);
recover_xnid:
/* 2: allocate new xattr nid */
if (unlikely(!inc_valid_node_count(sbi, inode)))
- f2fs_bug_on(1);
+ f2fs_bug_on(sbi, 1);
remove_free_nid(NM_I(sbi), new_xnid);
get_node_info(sbi, new_xnid, &ni);
static void release_nat_entry_set(struct nat_entry_set *nes,
struct f2fs_nm_info *nm_i)
{
- f2fs_bug_on(!list_empty(&nes->entry_list));
-
nm_i->dirty_nat_cnt -= nes->entry_cnt;
list_del(&nes->set_list);
kmem_cache_free(nat_entry_set_slab, nes);
} else {
page = get_next_nat_page(sbi, start_nid);
nat_blk = page_address(page);
- f2fs_bug_on(!nat_blk);
+ f2fs_bug_on(sbi, !nat_blk);
}
/* flush dirty nats in nat entry set */
if (to_journal) {
offset = lookup_journal_in_cursum(sum,
NAT_JOURNAL, nid, 1);
- f2fs_bug_on(offset < 0);
+ f2fs_bug_on(sbi, offset < 0);
raw_ne = &nat_in_journal(sum, offset);
nid_in_journal(sum, offset) = cpu_to_le32(nid);
} else {
else
f2fs_put_page(page, 1);
+ f2fs_bug_on(sbi, !list_empty(&nes->entry_list));
release_nat_entry_set(nes, nm_i);
}
- f2fs_bug_on(!list_empty(head));
- f2fs_bug_on(nm_i->dirty_nat_cnt);
+ f2fs_bug_on(sbi, !list_empty(head));
+ f2fs_bug_on(sbi, nm_i->dirty_nat_cnt);
}
static int init_node_manager(struct f2fs_sb_info *sbi)
/* destroy free nid list */
spin_lock(&nm_i->free_nid_list_lock);
list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
- f2fs_bug_on(i->state == NID_ALLOC);
+ f2fs_bug_on(sbi, i->state == NID_ALLOC);
__del_from_free_nid_list(nm_i, i);
nm_i->fcnt--;
spin_unlock(&nm_i->free_nid_list_lock);
kmem_cache_free(free_nid_slab, i);
spin_lock(&nm_i->free_nid_list_lock);
}
- f2fs_bug_on(nm_i->fcnt);
+ f2fs_bug_on(sbi, nm_i->fcnt);
spin_unlock(&nm_i->free_nid_list_lock);
/* destroy nat cache */
for (idx = 0; idx < found; idx++)
__del_from_nat_cache(nm_i, natvec[idx]);
}
- f2fs_bug_on(nm_i->nat_cnt);
+ f2fs_bug_on(sbi, nm_i->nat_cnt);
write_unlock(&nm_i->nat_tree_lock);
kfree(nm_i->nat_bitmap);
f2fs_wait_on_page_writeback(dn.node_page, NODE);
get_node_info(sbi, dn.nid, &ni);
- f2fs_bug_on(ni.ino != ino_of_node(page));
- f2fs_bug_on(ofs_of_node(dn.node_page) != ofs_of_node(page));
+ f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
+ f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
for (; start < end; start++) {
block_t src, dest;
if (src == NULL_ADDR) {
err = reserve_new_block(&dn);
/* We should not get -ENOSPC */
- f2fs_bug_on(err);
+ f2fs_bug_on(sbi, err);
}
/* Check the previous node page having this index */
/* step #2: recover data */
err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
if (!err)
- f2fs_bug_on(!list_empty(&inode_list));
+ f2fs_bug_on(sbi, !list_empty(&inode_list));
out:
destroy_fsync_dnodes(&inode_list);
kmem_cache_destroy(fsync_entry_slab);
new_vblocks = se->valid_blocks + del;
offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
- f2fs_bug_on((new_vblocks >> (sizeof(unsigned short) << 3) ||
+ f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) ||
(new_vblocks > sbi->blocks_per_seg)));
se->valid_blocks = new_vblocks;
unsigned int segno = GET_SEGNO(sbi, addr);
struct sit_info *sit_i = SIT_I(sbi);
- f2fs_bug_on(addr == NULL_ADDR);
+ f2fs_bug_on(sbi, addr == NULL_ADDR);
if (addr == NEW_ADDR)
return;
if (dir == ALLOC_RIGHT) {
secno = find_next_zero_bit(free_i->free_secmap,
TOTAL_SECS(sbi), 0);
- f2fs_bug_on(secno >= TOTAL_SECS(sbi));
+ f2fs_bug_on(sbi, secno >= TOTAL_SECS(sbi));
} else {
go_left = 1;
left_start = hint - 1;
}
left_start = find_next_zero_bit(free_i->free_secmap,
TOTAL_SECS(sbi), 0);
- f2fs_bug_on(left_start >= TOTAL_SECS(sbi));
+ f2fs_bug_on(sbi, left_start >= TOTAL_SECS(sbi));
break;
}
secno = left_start;
}
got_it:
/* set it as dirty segment in free segmap */
- f2fs_bug_on(test_bit(segno, free_i->free_segmap));
+ f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
__set_inuse(sbi, segno);
*newseg = segno;
write_unlock(&free_i->segmap_lock);
return __get_segment_type_4(page, p_type);
}
/* NR_CURSEG_TYPE(6) logs by default */
- f2fs_bug_on(F2FS_P_SB(page)->active_logs != NR_CURSEG_TYPE);
+ f2fs_bug_on(F2FS_P_SB(page),
+ F2FS_P_SB(page)->active_logs != NR_CURSEG_TYPE);
return __get_segment_type_6(page, p_type);
}
struct f2fs_summary sum;
struct node_info ni;
- f2fs_bug_on(dn->data_blkaddr == NULL_ADDR);
+ f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
get_node_info(sbi, dn->nid, &ni);
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
/* get current sit block page without lock */
src_page = get_meta_page(sbi, src_off);
dst_page = grab_meta_page(sbi, dst_off);
- f2fs_bug_on(PageDirty(src_page));
+ f2fs_bug_on(sbi, PageDirty(src_page));
src_addr = page_address(src_page);
dst_addr = page_address(dst_page);
alloc_nid_failed(sbi, new_nid);
return PTR_ERR(xpage);
}
- f2fs_bug_on(new_nid);
+ f2fs_bug_on(sbi, new_nid);
f2fs_wait_on_page_writeback(xpage, NODE);
} else {
struct dnode_of_data dn;