2 * linux/fs/ext4/resize.c
4 * Support for resizing an ext4 filesystem while it is mounted.
6 * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com>
8 * This could probably be made into a module, because it is not often in use.
14 #include <linux/errno.h>
15 #include <linux/slab.h>
17 #include "ext4_jbd2.h"
19 int ext4_resize_begin(struct super_block
*sb
)
23 if (!capable(CAP_SYS_RESOURCE
))
27 * We are not allowed to do online-resizing on a filesystem mounted
28 * with error, because it can destroy the filesystem easily.
30 if (EXT4_SB(sb
)->s_mount_state
& EXT4_ERROR_FS
) {
31 ext4_warning(sb
, "There are errors in the filesystem, "
32 "so online resizing is not allowed\n");
36 if (test_and_set_bit_lock(EXT4_RESIZING
, &EXT4_SB(sb
)->s_resize_flags
))
42 void ext4_resize_end(struct super_block
*sb
)
44 clear_bit_unlock(EXT4_RESIZING
, &EXT4_SB(sb
)->s_resize_flags
);
45 smp_mb__after_clear_bit();
48 #define outside(b, first, last) ((b) < (first) || (b) >= (last))
49 #define inside(b, first, last) ((b) >= (first) && (b) < (last))
51 static int verify_group_input(struct super_block
*sb
,
52 struct ext4_new_group_data
*input
)
54 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
55 struct ext4_super_block
*es
= sbi
->s_es
;
56 ext4_fsblk_t start
= ext4_blocks_count(es
);
57 ext4_fsblk_t end
= start
+ input
->blocks_count
;
58 ext4_group_t group
= input
->group
;
59 ext4_fsblk_t itend
= input
->inode_table
+ sbi
->s_itb_per_group
;
60 unsigned overhead
= ext4_bg_has_super(sb
, group
) ?
61 (1 + ext4_bg_num_gdb(sb
, group
) +
62 le16_to_cpu(es
->s_reserved_gdt_blocks
)) : 0;
63 ext4_fsblk_t metaend
= start
+ overhead
;
64 struct buffer_head
*bh
= NULL
;
65 ext4_grpblk_t free_blocks_count
, offset
;
68 input
->free_blocks_count
= free_blocks_count
=
69 input
->blocks_count
- 2 - overhead
- sbi
->s_itb_per_group
;
71 if (test_opt(sb
, DEBUG
))
72 printk(KERN_DEBUG
"EXT4-fs: adding %s group %u: %u blocks "
73 "(%d free, %u reserved)\n",
74 ext4_bg_has_super(sb
, input
->group
) ? "normal" :
75 "no-super", input
->group
, input
->blocks_count
,
76 free_blocks_count
, input
->reserved_blocks
);
78 ext4_get_group_no_and_offset(sb
, start
, NULL
, &offset
);
79 if (group
!= sbi
->s_groups_count
)
80 ext4_warning(sb
, "Cannot add at group %u (only %u groups)",
81 input
->group
, sbi
->s_groups_count
);
83 ext4_warning(sb
, "Last group not full");
84 else if (input
->reserved_blocks
> input
->blocks_count
/ 5)
85 ext4_warning(sb
, "Reserved blocks too high (%u)",
86 input
->reserved_blocks
);
87 else if (free_blocks_count
< 0)
88 ext4_warning(sb
, "Bad blocks count %u",
90 else if (!(bh
= sb_bread(sb
, end
- 1)))
91 ext4_warning(sb
, "Cannot read last block (%llu)",
93 else if (outside(input
->block_bitmap
, start
, end
))
94 ext4_warning(sb
, "Block bitmap not in group (block %llu)",
95 (unsigned long long)input
->block_bitmap
);
96 else if (outside(input
->inode_bitmap
, start
, end
))
97 ext4_warning(sb
, "Inode bitmap not in group (block %llu)",
98 (unsigned long long)input
->inode_bitmap
);
99 else if (outside(input
->inode_table
, start
, end
) ||
100 outside(itend
- 1, start
, end
))
101 ext4_warning(sb
, "Inode table not in group (blocks %llu-%llu)",
102 (unsigned long long)input
->inode_table
, itend
- 1);
103 else if (input
->inode_bitmap
== input
->block_bitmap
)
104 ext4_warning(sb
, "Block bitmap same as inode bitmap (%llu)",
105 (unsigned long long)input
->block_bitmap
);
106 else if (inside(input
->block_bitmap
, input
->inode_table
, itend
))
107 ext4_warning(sb
, "Block bitmap (%llu) in inode table "
109 (unsigned long long)input
->block_bitmap
,
110 (unsigned long long)input
->inode_table
, itend
- 1);
111 else if (inside(input
->inode_bitmap
, input
->inode_table
, itend
))
112 ext4_warning(sb
, "Inode bitmap (%llu) in inode table "
114 (unsigned long long)input
->inode_bitmap
,
115 (unsigned long long)input
->inode_table
, itend
- 1);
116 else if (inside(input
->block_bitmap
, start
, metaend
))
117 ext4_warning(sb
, "Block bitmap (%llu) in GDT table (%llu-%llu)",
118 (unsigned long long)input
->block_bitmap
,
120 else if (inside(input
->inode_bitmap
, start
, metaend
))
121 ext4_warning(sb
, "Inode bitmap (%llu) in GDT table (%llu-%llu)",
122 (unsigned long long)input
->inode_bitmap
,
124 else if (inside(input
->inode_table
, start
, metaend
) ||
125 inside(itend
- 1, start
, metaend
))
126 ext4_warning(sb
, "Inode table (%llu-%llu) overlaps GDT table "
128 (unsigned long long)input
->inode_table
,
129 itend
- 1, start
, metaend
- 1);
138 * ext4_new_flex_group_data is used by 64bit-resize interface to add a flex
141 struct ext4_new_flex_group_data
{
142 struct ext4_new_group_data
*groups
; /* new_group_data for groups
144 __u16
*bg_flags
; /* block group flags of groups
146 ext4_group_t count
; /* number of groups in @groups
151 * alloc_flex_gd() allocates a ext4_new_flex_group_data with size of
154 * Returns NULL on failure otherwise address of the allocated structure.
156 static struct ext4_new_flex_group_data
*alloc_flex_gd(unsigned long flexbg_size
)
158 struct ext4_new_flex_group_data
*flex_gd
;
160 flex_gd
= kmalloc(sizeof(*flex_gd
), GFP_NOFS
);
164 flex_gd
->count
= flexbg_size
;
166 flex_gd
->groups
= kmalloc(sizeof(struct ext4_new_group_data
) *
167 flexbg_size
, GFP_NOFS
);
168 if (flex_gd
->groups
== NULL
)
171 flex_gd
->bg_flags
= kmalloc(flexbg_size
* sizeof(__u16
), GFP_NOFS
);
172 if (flex_gd
->bg_flags
== NULL
)
178 kfree(flex_gd
->groups
);
185 static void free_flex_gd(struct ext4_new_flex_group_data
*flex_gd
)
187 kfree(flex_gd
->bg_flags
);
188 kfree(flex_gd
->groups
);
192 static struct buffer_head
*bclean(handle_t
*handle
, struct super_block
*sb
,
195 struct buffer_head
*bh
;
198 bh
= sb_getblk(sb
, blk
);
200 return ERR_PTR(-EIO
);
201 if ((err
= ext4_journal_get_write_access(handle
, bh
))) {
205 memset(bh
->b_data
, 0, sb
->s_blocksize
);
206 set_buffer_uptodate(bh
);
213 * If we have fewer than thresh credits, extend by EXT4_MAX_TRANS_DATA.
214 * If that fails, restart the transaction & regain write access for the
215 * buffer head which is used for block_bitmap modifications.
217 static int extend_or_restart_transaction(handle_t
*handle
, int thresh
)
221 if (ext4_handle_has_enough_credits(handle
, thresh
))
224 err
= ext4_journal_extend(handle
, EXT4_MAX_TRANS_DATA
);
228 err
= ext4_journal_restart(handle
, EXT4_MAX_TRANS_DATA
);
237 * set_flexbg_block_bitmap() mark @count blocks starting from @block used.
239 * Helper function for ext4_setup_new_group_blocks() which set .
242 * @handle: journal handle
243 * @flex_gd: flex group data
245 static int set_flexbg_block_bitmap(struct super_block
*sb
, handle_t
*handle
,
246 struct ext4_new_flex_group_data
*flex_gd
,
247 ext4_fsblk_t block
, ext4_group_t count
)
251 ext4_debug("mark blocks [%llu/%u] used\n", block
, count
);
252 for (count2
= count
; count
> 0; count
-= count2
, block
+= count2
) {
254 struct buffer_head
*bh
;
258 ext4_get_group_no_and_offset(sb
, block
, &group
, NULL
);
259 start
= ext4_group_first_block_no(sb
, group
);
260 group
-= flex_gd
->groups
[0].group
;
262 count2
= sb
->s_blocksize
* 8 - (block
- start
);
266 if (flex_gd
->bg_flags
[group
] & EXT4_BG_BLOCK_UNINIT
) {
267 BUG_ON(flex_gd
->count
> 1);
271 err
= extend_or_restart_transaction(handle
, 1);
275 bh
= sb_getblk(sb
, flex_gd
->groups
[group
].block_bitmap
);
279 err
= ext4_journal_get_write_access(handle
, bh
);
282 ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", block
,
283 block
- start
, count2
);
284 ext4_set_bits(bh
->b_data
, block
- start
, count2
);
286 err
= ext4_handle_dirty_metadata(handle
, NULL
, bh
);
296 * Set up the block and inode bitmaps, and the inode table for the new groups.
297 * This doesn't need to be part of the main transaction, since we are only
298 * changing blocks outside the actual filesystem. We still do journaling to
299 * ensure the recovery is correct in case of a failure just after resize.
300 * If any part of this fails, we simply abort the resize.
302 * setup_new_flex_group_blocks handles a flex group as follow:
303 * 1. copy super block and GDT, and initialize group tables if necessary.
304 * In this step, we only set bits in blocks bitmaps for blocks taken by
305 * super block and GDT.
306 * 2. allocate group tables in block bitmaps, that is, set bits in block
307 * bitmap for blocks taken by group tables.
309 static int setup_new_flex_group_blocks(struct super_block
*sb
,
310 struct ext4_new_flex_group_data
*flex_gd
)
312 int group_table_count
[] = {1, 1, EXT4_SB(sb
)->s_itb_per_group
};
315 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
316 struct ext4_super_block
*es
= sbi
->s_es
;
317 struct ext4_new_group_data
*group_data
= flex_gd
->groups
;
318 __u16
*bg_flags
= flex_gd
->bg_flags
;
320 ext4_group_t group
, count
;
321 struct buffer_head
*bh
= NULL
;
322 int reserved_gdb
, i
, j
, err
= 0, err2
;
324 BUG_ON(!flex_gd
->count
|| !group_data
||
325 group_data
[0].group
!= sbi
->s_groups_count
);
327 reserved_gdb
= le16_to_cpu(es
->s_reserved_gdt_blocks
);
329 /* This transaction may be extended/restarted along the way */
330 handle
= ext4_journal_start_sb(sb
, EXT4_MAX_TRANS_DATA
);
332 return PTR_ERR(handle
);
334 group
= group_data
[0].group
;
335 for (i
= 0; i
< flex_gd
->count
; i
++, group
++) {
336 unsigned long gdblocks
;
338 gdblocks
= ext4_bg_num_gdb(sb
, group
);
339 start
= ext4_group_first_block_no(sb
, group
);
341 /* Copy all of the GDT blocks into the backup in this group */
342 for (j
= 0, block
= start
+ 1; j
< gdblocks
; j
++, block
++) {
343 struct buffer_head
*gdb
;
345 ext4_debug("update backup group %#04llx\n", block
);
346 err
= extend_or_restart_transaction(handle
, 1);
350 gdb
= sb_getblk(sb
, block
);
356 err
= ext4_journal_get_write_access(handle
, gdb
);
361 memcpy(gdb
->b_data
, sbi
->s_group_desc
[j
]->b_data
,
363 set_buffer_uptodate(gdb
);
365 err
= ext4_handle_dirty_metadata(handle
, NULL
, gdb
);
373 /* Zero out all of the reserved backup group descriptor
376 if (ext4_bg_has_super(sb
, group
)) {
377 err
= sb_issue_zeroout(sb
, gdblocks
+ start
+ 1,
378 reserved_gdb
, GFP_NOFS
);
383 /* Initialize group tables of the grop @group */
384 if (!(bg_flags
[i
] & EXT4_BG_INODE_ZEROED
))
387 /* Zero out all of the inode table blocks */
388 block
= group_data
[i
].inode_table
;
389 ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
390 block
, sbi
->s_itb_per_group
);
391 err
= sb_issue_zeroout(sb
, block
, sbi
->s_itb_per_group
,
397 if (bg_flags
[i
] & EXT4_BG_BLOCK_UNINIT
)
400 /* Initialize block bitmap of the @group */
401 block
= group_data
[i
].block_bitmap
;
402 err
= extend_or_restart_transaction(handle
, 1);
406 bh
= bclean(handle
, sb
, block
);
411 if (ext4_bg_has_super(sb
, group
)) {
412 ext4_debug("mark backup superblock %#04llx (+0)\n",
414 ext4_set_bits(bh
->b_data
, 0, gdblocks
+ reserved_gdb
+
417 ext4_mark_bitmap_end(group_data
[i
].blocks_count
,
418 sb
->s_blocksize
* 8, bh
->b_data
);
419 err
= ext4_handle_dirty_metadata(handle
, NULL
, bh
);
425 if (bg_flags
[i
] & EXT4_BG_INODE_UNINIT
)
428 /* Initialize inode bitmap of the @group */
429 block
= group_data
[i
].inode_bitmap
;
430 err
= extend_or_restart_transaction(handle
, 1);
433 /* Mark unused entries in inode bitmap used */
434 bh
= bclean(handle
, sb
, block
);
440 ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb
),
441 sb
->s_blocksize
* 8, bh
->b_data
);
442 err
= ext4_handle_dirty_metadata(handle
, NULL
, bh
);
449 /* Mark group tables in block bitmap */
450 for (j
= 0; j
< GROUP_TABLE_COUNT
; j
++) {
451 count
= group_table_count
[j
];
452 start
= (&group_data
[0].block_bitmap
)[j
];
454 for (i
= 1; i
< flex_gd
->count
; i
++) {
455 block
+= group_table_count
[j
];
456 if (block
== (&group_data
[i
].block_bitmap
)[j
]) {
457 count
+= group_table_count
[j
];
460 err
= set_flexbg_block_bitmap(sb
, handle
,
461 flex_gd
, start
, count
);
464 count
= group_table_count
[j
];
465 start
= group_data
[i
].block_bitmap
;
470 err
= set_flexbg_block_bitmap(sb
, handle
,
471 flex_gd
, start
, count
);
479 err2
= ext4_journal_stop(handle
);
487 * Set up the block and inode bitmaps, and the inode table for the new group.
488 * This doesn't need to be part of the main transaction, since we are only
489 * changing blocks outside the actual filesystem. We still do journaling to
490 * ensure the recovery is correct in case of a failure just after resize.
491 * If any part of this fails, we simply abort the resize.
493 static int setup_new_group_blocks(struct super_block
*sb
,
494 struct ext4_new_group_data
*input
)
496 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
497 ext4_fsblk_t start
= ext4_group_first_block_no(sb
, input
->group
);
498 int reserved_gdb
= ext4_bg_has_super(sb
, input
->group
) ?
499 le16_to_cpu(sbi
->s_es
->s_reserved_gdt_blocks
) : 0;
500 unsigned long gdblocks
= ext4_bg_num_gdb(sb
, input
->group
);
501 struct buffer_head
*bh
;
508 /* This transaction may be extended/restarted along the way */
509 handle
= ext4_journal_start_sb(sb
, EXT4_MAX_TRANS_DATA
);
512 return PTR_ERR(handle
);
514 BUG_ON(input
->group
!= sbi
->s_groups_count
);
516 /* Copy all of the GDT blocks into the backup in this group */
517 for (i
= 0, bit
= 1, block
= start
+ 1;
518 i
< gdblocks
; i
++, block
++, bit
++) {
519 struct buffer_head
*gdb
;
521 ext4_debug("update backup group %#04llx (+%d)\n", block
, bit
);
522 err
= extend_or_restart_transaction(handle
, 1);
526 gdb
= sb_getblk(sb
, block
);
531 if ((err
= ext4_journal_get_write_access(handle
, gdb
))) {
535 memcpy(gdb
->b_data
, sbi
->s_group_desc
[i
]->b_data
, gdb
->b_size
);
536 set_buffer_uptodate(gdb
);
537 err
= ext4_handle_dirty_metadata(handle
, NULL
, gdb
);
545 /* Zero out all of the reserved backup group descriptor table blocks */
546 ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
547 block
, sbi
->s_itb_per_group
);
548 err
= sb_issue_zeroout(sb
, gdblocks
+ start
+ 1, reserved_gdb
,
553 err
= extend_or_restart_transaction(handle
, 2);
557 bh
= bclean(handle
, sb
, input
->block_bitmap
);
563 if (ext4_bg_has_super(sb
, input
->group
)) {
564 ext4_debug("mark backup group tables %#04llx (+0)\n", start
);
565 ext4_set_bits(bh
->b_data
, 0, gdblocks
+ reserved_gdb
+ 1);
568 ext4_debug("mark block bitmap %#04llx (+%llu)\n", input
->block_bitmap
,
569 input
->block_bitmap
- start
);
570 ext4_set_bit(input
->block_bitmap
- start
, bh
->b_data
);
571 ext4_debug("mark inode bitmap %#04llx (+%llu)\n", input
->inode_bitmap
,
572 input
->inode_bitmap
- start
);
573 ext4_set_bit(input
->inode_bitmap
- start
, bh
->b_data
);
575 /* Zero out all of the inode table blocks */
576 block
= input
->inode_table
;
577 ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
578 block
, sbi
->s_itb_per_group
);
579 err
= sb_issue_zeroout(sb
, block
, sbi
->s_itb_per_group
, GFP_NOFS
);
582 ext4_set_bits(bh
->b_data
, input
->inode_table
- start
,
583 sbi
->s_itb_per_group
);
586 ext4_mark_bitmap_end(input
->blocks_count
, sb
->s_blocksize
* 8,
588 err
= ext4_handle_dirty_metadata(handle
, NULL
, bh
);
590 ext4_std_error(sb
, err
);
594 /* Mark unused entries in inode bitmap used */
595 ext4_debug("clear inode bitmap %#04llx (+%llu)\n",
596 input
->inode_bitmap
, input
->inode_bitmap
- start
);
597 if (IS_ERR(bh
= bclean(handle
, sb
, input
->inode_bitmap
))) {
602 ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb
), sb
->s_blocksize
* 8,
604 err
= ext4_handle_dirty_metadata(handle
, NULL
, bh
);
606 ext4_std_error(sb
, err
);
611 if ((err2
= ext4_journal_stop(handle
)) && !err
)
618 * Iterate through the groups which hold BACKUP superblock/GDT copies in an
619 * ext4 filesystem. The counters should be initialized to 1, 5, and 7 before
620 * calling this for the first time. In a sparse filesystem it will be the
621 * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ...
622 * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ...
624 static unsigned ext4_list_backups(struct super_block
*sb
, unsigned *three
,
625 unsigned *five
, unsigned *seven
)
627 unsigned *min
= three
;
631 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb
,
632 EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER
)) {
654 * Check that all of the backup GDT blocks are held in the primary GDT block.
655 * It is assumed that they are stored in group order. Returns the number of
656 * groups in current filesystem that have BACKUPS, or -ve error code.
658 static int verify_reserved_gdb(struct super_block
*sb
,
660 struct buffer_head
*primary
)
662 const ext4_fsblk_t blk
= primary
->b_blocknr
;
667 __le32
*p
= (__le32
*)primary
->b_data
;
670 while ((grp
= ext4_list_backups(sb
, &three
, &five
, &seven
)) < end
) {
671 if (le32_to_cpu(*p
++) !=
672 grp
* EXT4_BLOCKS_PER_GROUP(sb
) + blk
){
673 ext4_warning(sb
, "reserved GDT %llu"
674 " missing grp %d (%llu)",
677 (ext4_fsblk_t
)EXT4_BLOCKS_PER_GROUP(sb
) +
681 if (++gdbackups
> EXT4_ADDR_PER_BLOCK(sb
))
689 * Called when we need to bring a reserved group descriptor table block into
690 * use from the resize inode. The primary copy of the new GDT block currently
691 * is an indirect block (under the double indirect block in the resize inode).
692 * The new backup GDT blocks will be stored as leaf blocks in this indirect
693 * block, in group order. Even though we know all the block numbers we need,
694 * we check to ensure that the resize inode has actually reserved these blocks.
696 * Don't need to update the block bitmaps because the blocks are still in use.
698 * We get all of the error cases out of the way, so that we are sure to not
699 * fail once we start modifying the data on disk, because JBD has no rollback.
701 static int add_new_gdb(handle_t
*handle
, struct inode
*inode
,
704 struct super_block
*sb
= inode
->i_sb
;
705 struct ext4_super_block
*es
= EXT4_SB(sb
)->s_es
;
706 unsigned long gdb_num
= group
/ EXT4_DESC_PER_BLOCK(sb
);
707 ext4_fsblk_t gdblock
= EXT4_SB(sb
)->s_sbh
->b_blocknr
+ 1 + gdb_num
;
708 struct buffer_head
**o_group_desc
, **n_group_desc
;
709 struct buffer_head
*dind
;
710 struct buffer_head
*gdb_bh
;
712 struct ext4_iloc iloc
;
716 if (test_opt(sb
, DEBUG
))
718 "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
722 * If we are not using the primary superblock/GDT copy don't resize,
723 * because the user tools have no way of handling this. Probably a
724 * bad time to do it anyways.
726 if (EXT4_SB(sb
)->s_sbh
->b_blocknr
!=
727 le32_to_cpu(EXT4_SB(sb
)->s_es
->s_first_data_block
)) {
728 ext4_warning(sb
, "won't resize using backup superblock at %llu",
729 (unsigned long long)EXT4_SB(sb
)->s_sbh
->b_blocknr
);
733 gdb_bh
= sb_bread(sb
, gdblock
);
737 gdbackups
= verify_reserved_gdb(sb
, group
, gdb_bh
);
743 data
= EXT4_I(inode
)->i_data
+ EXT4_DIND_BLOCK
;
744 dind
= sb_bread(sb
, le32_to_cpu(*data
));
750 data
= (__le32
*)dind
->b_data
;
751 if (le32_to_cpu(data
[gdb_num
% EXT4_ADDR_PER_BLOCK(sb
)]) != gdblock
) {
752 ext4_warning(sb
, "new group %u GDT block %llu not reserved",
758 err
= ext4_journal_get_write_access(handle
, EXT4_SB(sb
)->s_sbh
);
762 err
= ext4_journal_get_write_access(handle
, gdb_bh
);
766 err
= ext4_journal_get_write_access(handle
, dind
);
768 ext4_std_error(sb
, err
);
770 /* ext4_reserve_inode_write() gets a reference on the iloc */
771 err
= ext4_reserve_inode_write(handle
, inode
, &iloc
);
775 n_group_desc
= ext4_kvmalloc((gdb_num
+ 1) *
776 sizeof(struct buffer_head
*),
780 ext4_warning(sb
, "not enough memory for %lu groups",
786 * Finally, we have all of the possible failures behind us...
788 * Remove new GDT block from inode double-indirect block and clear out
789 * the new GDT block for use (which also "frees" the backup GDT blocks
790 * from the reserved inode). We don't need to change the bitmaps for
791 * these blocks, because they are marked as in-use from being in the
792 * reserved inode, and will become GDT blocks (primary and backup).
794 data
[gdb_num
% EXT4_ADDR_PER_BLOCK(sb
)] = 0;
795 err
= ext4_handle_dirty_metadata(handle
, NULL
, dind
);
797 ext4_std_error(sb
, err
);
800 inode
->i_blocks
-= (gdbackups
+ 1) * sb
->s_blocksize
>> 9;
801 ext4_mark_iloc_dirty(handle
, inode
, &iloc
);
802 memset(gdb_bh
->b_data
, 0, sb
->s_blocksize
);
803 err
= ext4_handle_dirty_metadata(handle
, NULL
, gdb_bh
);
805 ext4_std_error(sb
, err
);
810 o_group_desc
= EXT4_SB(sb
)->s_group_desc
;
811 memcpy(n_group_desc
, o_group_desc
,
812 EXT4_SB(sb
)->s_gdb_count
* sizeof(struct buffer_head
*));
813 n_group_desc
[gdb_num
] = gdb_bh
;
814 EXT4_SB(sb
)->s_group_desc
= n_group_desc
;
815 EXT4_SB(sb
)->s_gdb_count
++;
816 ext4_kvfree(o_group_desc
);
818 le16_add_cpu(&es
->s_reserved_gdt_blocks
, -1);
819 err
= ext4_handle_dirty_metadata(handle
, NULL
, EXT4_SB(sb
)->s_sbh
);
821 ext4_std_error(sb
, err
);
826 ext4_kvfree(n_group_desc
);
827 /* ext4_handle_release_buffer(handle, iloc.bh); */
830 /* ext4_handle_release_buffer(handle, dind); */
832 /* ext4_handle_release_buffer(handle, EXT4_SB(sb)->s_sbh); */
838 ext4_debug("leaving with error %d\n", err
);
843 * Called when we are adding a new group which has a backup copy of each of
844 * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks.
845 * We need to add these reserved backup GDT blocks to the resize inode, so
846 * that they are kept for future resizing and not allocated to files.
848 * Each reserved backup GDT block will go into a different indirect block.
849 * The indirect blocks are actually the primary reserved GDT blocks,
850 * so we know in advance what their block numbers are. We only get the
851 * double-indirect block to verify it is pointing to the primary reserved
852 * GDT blocks so we don't overwrite a data block by accident. The reserved
853 * backup GDT blocks are stored in their reserved primary GDT block.
855 static int reserve_backup_gdb(handle_t
*handle
, struct inode
*inode
,
858 struct super_block
*sb
= inode
->i_sb
;
859 int reserved_gdb
=le16_to_cpu(EXT4_SB(sb
)->s_es
->s_reserved_gdt_blocks
);
860 struct buffer_head
**primary
;
861 struct buffer_head
*dind
;
862 struct ext4_iloc iloc
;
869 primary
= kmalloc(reserved_gdb
* sizeof(*primary
), GFP_NOFS
);
873 data
= EXT4_I(inode
)->i_data
+ EXT4_DIND_BLOCK
;
874 dind
= sb_bread(sb
, le32_to_cpu(*data
));
880 blk
= EXT4_SB(sb
)->s_sbh
->b_blocknr
+ 1 + EXT4_SB(sb
)->s_gdb_count
;
881 data
= (__le32
*)dind
->b_data
+ (EXT4_SB(sb
)->s_gdb_count
%
882 EXT4_ADDR_PER_BLOCK(sb
));
883 end
= (__le32
*)dind
->b_data
+ EXT4_ADDR_PER_BLOCK(sb
);
885 /* Get each reserved primary GDT block and verify it holds backups */
886 for (res
= 0; res
< reserved_gdb
; res
++, blk
++) {
887 if (le32_to_cpu(*data
) != blk
) {
888 ext4_warning(sb
, "reserved block %llu"
889 " not at offset %ld",
891 (long)(data
- (__le32
*)dind
->b_data
));
895 primary
[res
] = sb_bread(sb
, blk
);
900 gdbackups
= verify_reserved_gdb(sb
, group
, primary
[res
]);
902 brelse(primary
[res
]);
907 data
= (__le32
*)dind
->b_data
;
910 for (i
= 0; i
< reserved_gdb
; i
++) {
911 if ((err
= ext4_journal_get_write_access(handle
, primary
[i
]))) {
914 for (j = 0; j < i; j++)
915 ext4_handle_release_buffer(handle, primary[j]);
921 if ((err
= ext4_reserve_inode_write(handle
, inode
, &iloc
)))
925 * Finally we can add each of the reserved backup GDT blocks from
926 * the new group to its reserved primary GDT block.
928 blk
= group
* EXT4_BLOCKS_PER_GROUP(sb
);
929 for (i
= 0; i
< reserved_gdb
; i
++) {
931 data
= (__le32
*)primary
[i
]->b_data
;
932 /* printk("reserving backup %lu[%u] = %lu\n",
933 primary[i]->b_blocknr, gdbackups,
934 blk + primary[i]->b_blocknr); */
935 data
[gdbackups
] = cpu_to_le32(blk
+ primary
[i
]->b_blocknr
);
936 err2
= ext4_handle_dirty_metadata(handle
, NULL
, primary
[i
]);
940 inode
->i_blocks
+= reserved_gdb
* sb
->s_blocksize
>> 9;
941 ext4_mark_iloc_dirty(handle
, inode
, &iloc
);
945 brelse(primary
[res
]);
955 * Update the backup copies of the ext4 metadata. These don't need to be part
956 * of the main resize transaction, because e2fsck will re-write them if there
957 * is a problem (basically only OOM will cause a problem). However, we
958 * _should_ update the backups if possible, in case the primary gets trashed
959 * for some reason and we need to run e2fsck from a backup superblock. The
960 * important part is that the new block and inode counts are in the backup
961 * superblocks, and the location of the new group metadata in the GDT backups.
963 * We do not need take the s_resize_lock for this, because these
964 * blocks are not otherwise touched by the filesystem code when it is
965 * mounted. We don't need to worry about last changing from
966 * sbi->s_groups_count, because the worst that can happen is that we
967 * do not copy the full number of backups at this time. The resize
968 * which changed s_groups_count will backup again.
970 static void update_backups(struct super_block
*sb
,
971 int blk_off
, char *data
, int size
)
973 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
974 const ext4_group_t last
= sbi
->s_groups_count
;
975 const int bpg
= EXT4_BLOCKS_PER_GROUP(sb
);
980 int rest
= sb
->s_blocksize
- size
;
984 handle
= ext4_journal_start_sb(sb
, EXT4_MAX_TRANS_DATA
);
985 if (IS_ERR(handle
)) {
987 err
= PTR_ERR(handle
);
991 while ((group
= ext4_list_backups(sb
, &three
, &five
, &seven
)) < last
) {
992 struct buffer_head
*bh
;
994 /* Out of journal space, and can't get more - abort - so sad */
995 if (ext4_handle_valid(handle
) &&
996 handle
->h_buffer_credits
== 0 &&
997 ext4_journal_extend(handle
, EXT4_MAX_TRANS_DATA
) &&
998 (err
= ext4_journal_restart(handle
, EXT4_MAX_TRANS_DATA
)))
1001 bh
= sb_getblk(sb
, group
* bpg
+ blk_off
);
1006 ext4_debug("update metadata backup %#04lx\n",
1007 (unsigned long)bh
->b_blocknr
);
1008 if ((err
= ext4_journal_get_write_access(handle
, bh
)))
1011 memcpy(bh
->b_data
, data
, size
);
1013 memset(bh
->b_data
+ size
, 0, rest
);
1014 set_buffer_uptodate(bh
);
1016 err
= ext4_handle_dirty_metadata(handle
, NULL
, bh
);
1018 ext4_std_error(sb
, err
);
1021 if ((err2
= ext4_journal_stop(handle
)) && !err
)
1025 * Ugh! Need to have e2fsck write the backup copies. It is too
1026 * late to revert the resize, we shouldn't fail just because of
1027 * the backup copies (they are only needed in case of corruption).
1029 * However, if we got here we have a journal problem too, so we
1030 * can't really start a transaction to mark the superblock.
1031 * Chicken out and just set the flag on the hope it will be written
1032 * to disk, and if not - we will simply wait until next fsck.
1036 ext4_warning(sb
, "can't update backup for group %u (err %d), "
1037 "forcing fsck on next reboot", group
, err
);
1038 sbi
->s_mount_state
&= ~EXT4_VALID_FS
;
1039 sbi
->s_es
->s_state
&= cpu_to_le16(~EXT4_VALID_FS
);
1040 mark_buffer_dirty(sbi
->s_sbh
);
1045 * ext4_add_new_descs() adds @count group descriptor of groups
1046 * starting at @group
1048 * @handle: journal handle
1050 * @group: the group no. of the first group desc to be added
1051 * @resize_inode: the resize inode
1052 * @count: number of group descriptors to be added
1054 static int ext4_add_new_descs(handle_t
*handle
, struct super_block
*sb
,
1055 ext4_group_t group
, struct inode
*resize_inode
,
1058 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
1059 struct ext4_super_block
*es
= sbi
->s_es
;
1060 struct buffer_head
*gdb_bh
;
1061 int i
, gdb_off
, gdb_num
, err
= 0;
1063 for (i
= 0; i
< count
; i
++, group
++) {
1064 int reserved_gdb
= ext4_bg_has_super(sb
, group
) ?
1065 le16_to_cpu(es
->s_reserved_gdt_blocks
) : 0;
1067 gdb_off
= group
% EXT4_DESC_PER_BLOCK(sb
);
1068 gdb_num
= group
/ EXT4_DESC_PER_BLOCK(sb
);
1071 * We will only either add reserved group blocks to a backup group
1072 * or remove reserved blocks for the first group in a new group block.
1073 * Doing both would be mean more complex code, and sane people don't
1074 * use non-sparse filesystems anymore. This is already checked above.
1077 gdb_bh
= sbi
->s_group_desc
[gdb_num
];
1078 err
= ext4_journal_get_write_access(handle
, gdb_bh
);
1080 if (!err
&& reserved_gdb
&& ext4_bg_num_gdb(sb
, group
))
1081 err
= reserve_backup_gdb(handle
, resize_inode
, group
);
1083 err
= add_new_gdb(handle
, resize_inode
, group
);
1091 * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg
1093 static int ext4_setup_new_descs(handle_t
*handle
, struct super_block
*sb
,
1094 struct ext4_new_flex_group_data
*flex_gd
)
1096 struct ext4_new_group_data
*group_data
= flex_gd
->groups
;
1097 struct ext4_group_desc
*gdp
;
1098 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
1099 struct buffer_head
*gdb_bh
;
1101 __u16
*bg_flags
= flex_gd
->bg_flags
;
1102 int i
, gdb_off
, gdb_num
, err
= 0;
1105 for (i
= 0; i
< flex_gd
->count
; i
++, group_data
++, bg_flags
++) {
1106 group
= group_data
->group
;
1108 gdb_off
= group
% EXT4_DESC_PER_BLOCK(sb
);
1109 gdb_num
= group
/ EXT4_DESC_PER_BLOCK(sb
);
1112 * get_write_access() has been called on gdb_bh by ext4_add_new_desc().
1114 gdb_bh
= sbi
->s_group_desc
[gdb_num
];
1115 /* Update group descriptor block for new group */
1116 gdp
= (struct ext4_group_desc
*)((char *)gdb_bh
->b_data
+
1117 gdb_off
* EXT4_DESC_SIZE(sb
));
1119 memset(gdp
, 0, EXT4_DESC_SIZE(sb
));
1120 ext4_block_bitmap_set(sb
, gdp
, group_data
->block_bitmap
);
1121 ext4_inode_bitmap_set(sb
, gdp
, group_data
->inode_bitmap
);
1122 ext4_inode_table_set(sb
, gdp
, group_data
->inode_table
);
1123 ext4_free_group_clusters_set(sb
, gdp
,
1124 EXT4_B2C(sbi
, group_data
->free_blocks_count
));
1125 ext4_free_inodes_set(sb
, gdp
, EXT4_INODES_PER_GROUP(sb
));
1126 gdp
->bg_flags
= cpu_to_le16(*bg_flags
);
1127 gdp
->bg_checksum
= ext4_group_desc_csum(sbi
, group
, gdp
);
1129 err
= ext4_handle_dirty_metadata(handle
, NULL
, gdb_bh
);
1130 if (unlikely(err
)) {
1131 ext4_std_error(sb
, err
);
1136 * We can allocate memory for mb_alloc based on the new group
1139 err
= ext4_mb_add_groupinfo(sb
, group
, gdp
);
1147 * ext4_update_super() updates the super block so that the newly added
1148 * groups can be seen by the filesystem.
1151 * @flex_gd: new added groups
1153 static void ext4_update_super(struct super_block
*sb
,
1154 struct ext4_new_flex_group_data
*flex_gd
)
1156 ext4_fsblk_t blocks_count
= 0;
1157 ext4_fsblk_t free_blocks
= 0;
1158 ext4_fsblk_t reserved_blocks
= 0;
1159 struct ext4_new_group_data
*group_data
= flex_gd
->groups
;
1160 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
1161 struct ext4_super_block
*es
= sbi
->s_es
;
1164 BUG_ON(flex_gd
->count
== 0 || group_data
== NULL
);
1166 * Make the new blocks and inodes valid next. We do this before
1167 * increasing the group count so that once the group is enabled,
1168 * all of its blocks and inodes are already valid.
1170 * We always allocate group-by-group, then block-by-block or
1171 * inode-by-inode within a group, so enabling these
1172 * blocks/inodes before the group is live won't actually let us
1173 * allocate the new space yet.
1175 for (i
= 0; i
< flex_gd
->count
; i
++) {
1176 blocks_count
+= group_data
[i
].blocks_count
;
1177 free_blocks
+= group_data
[i
].free_blocks_count
;
1180 reserved_blocks
= ext4_r_blocks_count(es
) * 100;
1181 do_div(reserved_blocks
, ext4_blocks_count(es
));
1182 reserved_blocks
*= blocks_count
;
1183 do_div(reserved_blocks
, 100);
1185 ext4_blocks_count_set(es
, ext4_blocks_count(es
) + blocks_count
);
1186 le32_add_cpu(&es
->s_inodes_count
, EXT4_INODES_PER_GROUP(sb
) *
1190 * We need to protect s_groups_count against other CPUs seeing
1191 * inconsistent state in the superblock.
1193 * The precise rules we use are:
1195 * * Writers must perform a smp_wmb() after updating all
1196 * dependent data and before modifying the groups count
1198 * * Readers must perform an smp_rmb() after reading the groups
1199 * count and before reading any dependent data.
1201 * NB. These rules can be relaxed when checking the group count
1202 * while freeing data, as we can only allocate from a block
1203 * group after serialising against the group count, and we can
1204 * only then free after serialising in turn against that
1209 /* Update the global fs size fields */
1210 sbi
->s_groups_count
+= flex_gd
->count
;
1212 /* Update the reserved block counts only once the new group is
1214 ext4_r_blocks_count_set(es
, ext4_r_blocks_count(es
) +
1217 /* Update the free space counts */
1218 percpu_counter_add(&sbi
->s_freeclusters_counter
,
1219 EXT4_B2C(sbi
, free_blocks
));
1220 percpu_counter_add(&sbi
->s_freeinodes_counter
,
1221 EXT4_INODES_PER_GROUP(sb
) * flex_gd
->count
);
1223 if (EXT4_HAS_INCOMPAT_FEATURE(sb
,
1224 EXT4_FEATURE_INCOMPAT_FLEX_BG
) &&
1225 sbi
->s_log_groups_per_flex
) {
1226 ext4_group_t flex_group
;
1227 flex_group
= ext4_flex_group(sbi
, group_data
[0].group
);
1228 atomic_add(EXT4_B2C(sbi
, free_blocks
),
1229 &sbi
->s_flex_groups
[flex_group
].free_clusters
);
1230 atomic_add(EXT4_INODES_PER_GROUP(sb
) * flex_gd
->count
,
1231 &sbi
->s_flex_groups
[flex_group
].free_inodes
);
1234 if (test_opt(sb
, DEBUG
))
1235 printk(KERN_DEBUG
"EXT4-fs: added group %u:"
1236 "%llu blocks(%llu free %llu reserved)\n", flex_gd
->count
,
1237 blocks_count
, free_blocks
, reserved_blocks
);
1240 /* Add group descriptor data to an existing or new group descriptor block.
1241 * Ensure we handle all possible error conditions _before_ we start modifying
1242 * the filesystem, because we cannot abort the transaction and not have it
1243 * write the data to disk.
1245 * If we are on a GDT block boundary, we need to get the reserved GDT block.
1246 * Otherwise, we may need to add backup GDT blocks for a sparse group.
1248 * We only need to hold the superblock lock while we are actually adding
1249 * in the new group's counts to the superblock. Prior to that we have
1250 * not really "added" the group at all. We re-check that we are still
1251 * adding in the last group in case things have changed since verifying.
1253 int ext4_group_add(struct super_block
*sb
, struct ext4_new_group_data
*input
)
1255 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
1256 struct ext4_super_block
*es
= sbi
->s_es
;
1257 int reserved_gdb
= ext4_bg_has_super(sb
, input
->group
) ?
1258 le16_to_cpu(es
->s_reserved_gdt_blocks
) : 0;
1259 struct buffer_head
*primary
= NULL
;
1260 struct ext4_group_desc
*gdp
;
1261 struct inode
*inode
= NULL
;
1263 int gdb_off
, gdb_num
;
1266 gdb_num
= input
->group
/ EXT4_DESC_PER_BLOCK(sb
);
1267 gdb_off
= input
->group
% EXT4_DESC_PER_BLOCK(sb
);
1269 if (gdb_off
== 0 && !EXT4_HAS_RO_COMPAT_FEATURE(sb
,
1270 EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER
)) {
1271 ext4_warning(sb
, "Can't resize non-sparse filesystem further");
1275 if (ext4_blocks_count(es
) + input
->blocks_count
<
1276 ext4_blocks_count(es
)) {
1277 ext4_warning(sb
, "blocks_count overflow");
1281 if (le32_to_cpu(es
->s_inodes_count
) + EXT4_INODES_PER_GROUP(sb
) <
1282 le32_to_cpu(es
->s_inodes_count
)) {
1283 ext4_warning(sb
, "inodes_count overflow");
1287 if (reserved_gdb
|| gdb_off
== 0) {
1288 if (!EXT4_HAS_COMPAT_FEATURE(sb
,
1289 EXT4_FEATURE_COMPAT_RESIZE_INODE
)
1290 || !le16_to_cpu(es
->s_reserved_gdt_blocks
)) {
1292 "No reserved GDT blocks, can't resize");
1295 inode
= ext4_iget(sb
, EXT4_RESIZE_INO
);
1296 if (IS_ERR(inode
)) {
1297 ext4_warning(sb
, "Error opening resize inode");
1298 return PTR_ERR(inode
);
1303 if ((err
= verify_group_input(sb
, input
)))
1306 if ((err
= setup_new_group_blocks(sb
, input
)))
1310 * We will always be modifying at least the superblock and a GDT
1311 * block. If we are adding a group past the last current GDT block,
1312 * we will also modify the inode and the dindirect block. If we
1313 * are adding a group with superblock/GDT backups we will also
1314 * modify each of the reserved GDT dindirect blocks.
1316 handle
= ext4_journal_start_sb(sb
,
1317 ext4_bg_has_super(sb
, input
->group
) ?
1318 3 + reserved_gdb
: 4);
1319 if (IS_ERR(handle
)) {
1320 err
= PTR_ERR(handle
);
1324 if ((err
= ext4_journal_get_write_access(handle
, sbi
->s_sbh
)))
1328 * We will only either add reserved group blocks to a backup group
1329 * or remove reserved blocks for the first group in a new group block.
1330 * Doing both would be mean more complex code, and sane people don't
1331 * use non-sparse filesystems anymore. This is already checked above.
1334 primary
= sbi
->s_group_desc
[gdb_num
];
1335 if ((err
= ext4_journal_get_write_access(handle
, primary
)))
1338 if (reserved_gdb
&& ext4_bg_num_gdb(sb
, input
->group
)) {
1339 err
= reserve_backup_gdb(handle
, inode
, input
->group
);
1345 * Note that we can access new group descriptor block safely
1346 * only if add_new_gdb() succeeds.
1348 err
= add_new_gdb(handle
, inode
, input
->group
);
1351 primary
= sbi
->s_group_desc
[gdb_num
];
1355 * OK, now we've set up the new group. Time to make it active.
1357 * so we have to be safe wrt. concurrent accesses the group
1358 * data. So we need to be careful to set all of the relevant
1359 * group descriptor data etc. *before* we enable the group.
1361 * The key field here is sbi->s_groups_count: as long as
1362 * that retains its old value, nobody is going to access the new
1365 * So first we update all the descriptor metadata for the new
1366 * group; then we update the total disk blocks count; then we
1367 * update the groups count to enable the group; then finally we
1368 * update the free space counts so that the system can start
1369 * using the new disk blocks.
1372 /* Update group descriptor block for new group */
1373 gdp
= (struct ext4_group_desc
*)((char *)primary
->b_data
+
1374 gdb_off
* EXT4_DESC_SIZE(sb
));
1376 memset(gdp
, 0, EXT4_DESC_SIZE(sb
));
1377 ext4_block_bitmap_set(sb
, gdp
, input
->block_bitmap
); /* LV FIXME */
1378 ext4_inode_bitmap_set(sb
, gdp
, input
->inode_bitmap
); /* LV FIXME */
1379 ext4_inode_table_set(sb
, gdp
, input
->inode_table
); /* LV FIXME */
1380 ext4_free_group_clusters_set(sb
, gdp
, input
->free_blocks_count
);
1381 ext4_free_inodes_set(sb
, gdp
, EXT4_INODES_PER_GROUP(sb
));
1382 gdp
->bg_flags
= cpu_to_le16(EXT4_BG_INODE_ZEROED
);
1383 gdp
->bg_checksum
= ext4_group_desc_csum(sbi
, input
->group
, gdp
);
1386 * We can allocate memory for mb_alloc based on the new group
1389 err
= ext4_mb_add_groupinfo(sb
, input
->group
, gdp
);
1394 * Make the new blocks and inodes valid next. We do this before
1395 * increasing the group count so that once the group is enabled,
1396 * all of its blocks and inodes are already valid.
1398 * We always allocate group-by-group, then block-by-block or
1399 * inode-by-inode within a group, so enabling these
1400 * blocks/inodes before the group is live won't actually let us
1401 * allocate the new space yet.
1403 ext4_blocks_count_set(es
, ext4_blocks_count(es
) +
1404 input
->blocks_count
);
1405 le32_add_cpu(&es
->s_inodes_count
, EXT4_INODES_PER_GROUP(sb
));
1408 * We need to protect s_groups_count against other CPUs seeing
1409 * inconsistent state in the superblock.
1411 * The precise rules we use are:
1413 * * Writers must perform a smp_wmb() after updating all dependent
1414 * data and before modifying the groups count
1416 * * Readers must perform an smp_rmb() after reading the groups count
1417 * and before reading any dependent data.
1419 * NB. These rules can be relaxed when checking the group count
1420 * while freeing data, as we can only allocate from a block
1421 * group after serialising against the group count, and we can
1422 * only then free after serialising in turn against that
1427 /* Update the global fs size fields */
1428 sbi
->s_groups_count
++;
1430 err
= ext4_handle_dirty_metadata(handle
, NULL
, primary
);
1431 if (unlikely(err
)) {
1432 ext4_std_error(sb
, err
);
1436 /* Update the reserved block counts only once the new group is
1438 ext4_r_blocks_count_set(es
, ext4_r_blocks_count(es
) +
1439 input
->reserved_blocks
);
1441 /* Update the free space counts */
1442 percpu_counter_add(&sbi
->s_freeclusters_counter
,
1443 EXT4_B2C(sbi
, input
->free_blocks_count
));
1444 percpu_counter_add(&sbi
->s_freeinodes_counter
,
1445 EXT4_INODES_PER_GROUP(sb
));
1447 if (EXT4_HAS_INCOMPAT_FEATURE(sb
, EXT4_FEATURE_INCOMPAT_FLEX_BG
) &&
1448 sbi
->s_log_groups_per_flex
) {
1449 ext4_group_t flex_group
;
1450 flex_group
= ext4_flex_group(sbi
, input
->group
);
1451 atomic_add(EXT4_B2C(sbi
, input
->free_blocks_count
),
1452 &sbi
->s_flex_groups
[flex_group
].free_clusters
);
1453 atomic_add(EXT4_INODES_PER_GROUP(sb
),
1454 &sbi
->s_flex_groups
[flex_group
].free_inodes
);
1457 ext4_handle_dirty_super(handle
, sb
);
1460 if ((err2
= ext4_journal_stop(handle
)) && !err
)
1462 if (!err
&& primary
) {
1463 update_backups(sb
, sbi
->s_sbh
->b_blocknr
, (char *)es
,
1464 sizeof(struct ext4_super_block
));
1465 update_backups(sb
, primary
->b_blocknr
, primary
->b_data
,
1471 } /* ext4_group_add */
1474 * extend a group without checking assuming that checking has been done.
1476 static int ext4_group_extend_no_check(struct super_block
*sb
,
1477 ext4_fsblk_t o_blocks_count
, ext4_grpblk_t add
)
1479 struct ext4_super_block
*es
= EXT4_SB(sb
)->s_es
;
1483 /* We will update the superblock, one block bitmap, and
1484 * one group descriptor via ext4_group_add_blocks().
1486 handle
= ext4_journal_start_sb(sb
, 3);
1487 if (IS_ERR(handle
)) {
1488 err
= PTR_ERR(handle
);
1489 ext4_warning(sb
, "error %d on journal start", err
);
1493 err
= ext4_journal_get_write_access(handle
, EXT4_SB(sb
)->s_sbh
);
1495 ext4_warning(sb
, "error %d on journal write access", err
);
1499 ext4_blocks_count_set(es
, o_blocks_count
+ add
);
1500 ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count
,
1501 o_blocks_count
+ add
);
1502 /* We add the blocks to the bitmap and set the group need init bit */
1503 err
= ext4_group_add_blocks(handle
, sb
, o_blocks_count
, add
);
1506 ext4_handle_dirty_super(handle
, sb
);
1507 ext4_debug("freed blocks %llu through %llu\n", o_blocks_count
,
1508 o_blocks_count
+ add
);
1510 err2
= ext4_journal_stop(handle
);
1515 if (test_opt(sb
, DEBUG
))
1516 printk(KERN_DEBUG
"EXT4-fs: extended group to %llu "
1517 "blocks\n", ext4_blocks_count(es
));
1518 update_backups(sb
, EXT4_SB(sb
)->s_sbh
->b_blocknr
, (char *)es
,
1519 sizeof(struct ext4_super_block
));
1525 * Extend the filesystem to the new number of blocks specified. This entry
1526 * point is only used to extend the current filesystem to the end of the last
1527 * existing group. It can be accessed via ioctl, or by "remount,resize=<size>"
1528 * for emergencies (because it has no dependencies on reserved blocks).
1530 * If we _really_ wanted, we could use default values to call ext4_group_add()
1531 * allow the "remount" trick to work for arbitrary resizing, assuming enough
1532 * GDT blocks are reserved to grow to the desired size.
1534 int ext4_group_extend(struct super_block
*sb
, struct ext4_super_block
*es
,
1535 ext4_fsblk_t n_blocks_count
)
1537 ext4_fsblk_t o_blocks_count
;
1540 struct buffer_head
*bh
;
1545 o_blocks_count
= ext4_blocks_count(es
);
1547 if (test_opt(sb
, DEBUG
))
1548 printk(KERN_DEBUG
"EXT4-fs: extending last group from %llu to %llu blocks\n",
1549 o_blocks_count
, n_blocks_count
);
1551 if (n_blocks_count
== 0 || n_blocks_count
== o_blocks_count
)
1554 if (n_blocks_count
> (sector_t
)(~0ULL) >> (sb
->s_blocksize_bits
- 9)) {
1555 printk(KERN_ERR
"EXT4-fs: filesystem on %s:"
1556 " too large to resize to %llu blocks safely\n",
1557 sb
->s_id
, n_blocks_count
);
1558 if (sizeof(sector_t
) < 8)
1559 ext4_warning(sb
, "CONFIG_LBDAF not enabled");
1563 if (n_blocks_count
< o_blocks_count
) {
1564 ext4_warning(sb
, "can't shrink FS - resize aborted");
1568 /* Handle the remaining blocks in the last group only. */
1569 ext4_get_group_no_and_offset(sb
, o_blocks_count
, &group
, &last
);
1572 ext4_warning(sb
, "need to use ext2online to resize further");
1576 add
= EXT4_BLOCKS_PER_GROUP(sb
) - last
;
1578 if (o_blocks_count
+ add
< o_blocks_count
) {
1579 ext4_warning(sb
, "blocks_count overflow");
1583 if (o_blocks_count
+ add
> n_blocks_count
)
1584 add
= n_blocks_count
- o_blocks_count
;
1586 if (o_blocks_count
+ add
< n_blocks_count
)
1587 ext4_warning(sb
, "will only finish group (%llu blocks, %u new)",
1588 o_blocks_count
+ add
, add
);
1590 /* See if the device is actually as big as what was requested */
1591 bh
= sb_bread(sb
, o_blocks_count
+ add
- 1);
1593 ext4_warning(sb
, "can't read last block, resize aborted");
1598 /* We will update the superblock, one block bitmap, and
1599 * one group descriptor via ext4_free_blocks().
1601 handle
= ext4_journal_start_sb(sb
, 3);
1602 if (IS_ERR(handle
)) {
1603 err
= PTR_ERR(handle
);
1604 ext4_warning(sb
, "error %d on journal start", err
);
1608 if ((err
= ext4_journal_get_write_access(handle
,
1609 EXT4_SB(sb
)->s_sbh
))) {
1610 ext4_warning(sb
, "error %d on journal write access", err
);
1611 ext4_journal_stop(handle
);
1614 ext4_blocks_count_set(es
, o_blocks_count
+ add
);
1615 ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count
,
1616 o_blocks_count
+ add
);
1617 /* We add the blocks to the bitmap and set the group need init bit */
1618 err
= ext4_group_add_blocks(handle
, sb
, o_blocks_count
, add
);
1619 ext4_handle_dirty_super(handle
, sb
);
1620 ext4_debug("freed blocks %llu through %llu\n", o_blocks_count
,
1621 o_blocks_count
+ add
);
1622 err2
= ext4_journal_stop(handle
);
1629 if (test_opt(sb
, DEBUG
))
1630 printk(KERN_DEBUG
"EXT4-fs: extended group to %llu blocks\n",
1631 ext4_blocks_count(es
));
1632 update_backups(sb
, EXT4_SB(sb
)->s_sbh
->b_blocknr
, (char *)es
,
1633 sizeof(struct ext4_super_block
));
1636 } /* ext4_group_extend */