Merge tag 'for-linus-20130318' of git://git.infradead.org/linux-mtd
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / ext4 / balloc.c
1 /*
2 * linux/fs/ext4/balloc.c
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
10 * Big-endian to little-endian byte-swapping/bitmaps by
11 * David S. Miller (davem@caip.rutgers.edu), 1995
12 */
13
14 #include <linux/time.h>
15 #include <linux/capability.h>
16 #include <linux/fs.h>
17 #include <linux/jbd2.h>
18 #include <linux/quotaops.h>
19 #include <linux/buffer_head.h>
20 #include "ext4.h"
21 #include "ext4_jbd2.h"
22 #include "mballoc.h"
23
24 #include <trace/events/ext4.h>
25
26 static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
27 ext4_group_t block_group);
28 /*
29 * balloc.c contains the blocks allocation and deallocation routines
30 */
31
32 /*
33 * Calculate the block group number and offset into the block/cluster
34 * allocation bitmap, given a block number
35 */
36 void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
37 ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp)
38 {
39 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
40 ext4_grpblk_t offset;
41
42 blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
43 offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)) >>
44 EXT4_SB(sb)->s_cluster_bits;
45 if (offsetp)
46 *offsetp = offset;
47 if (blockgrpp)
48 *blockgrpp = blocknr;
49
50 }
51
52 static int ext4_block_in_group(struct super_block *sb, ext4_fsblk_t block,
53 ext4_group_t block_group)
54 {
55 ext4_group_t actual_group;
56 ext4_get_group_no_and_offset(sb, block, &actual_group, NULL);
57 if (actual_group == block_group)
58 return 1;
59 return 0;
60 }
61
62 /* Return the number of clusters used for file system metadata; this
63 * represents the overhead needed by the file system.
64 */
65 unsigned ext4_num_overhead_clusters(struct super_block *sb,
66 ext4_group_t block_group,
67 struct ext4_group_desc *gdp)
68 {
69 unsigned num_clusters;
70 int block_cluster = -1, inode_cluster = -1, itbl_cluster = -1, i, c;
71 ext4_fsblk_t start = ext4_group_first_block_no(sb, block_group);
72 ext4_fsblk_t itbl_blk;
73 struct ext4_sb_info *sbi = EXT4_SB(sb);
74
75 /* This is the number of clusters used by the superblock,
76 * block group descriptors, and reserved block group
77 * descriptor blocks */
78 num_clusters = ext4_num_base_meta_clusters(sb, block_group);
79
80 /*
81 * For the allocation bitmaps and inode table, we first need
82 * to check to see if the block is in the block group. If it
83 * is, then check to see if the cluster is already accounted
84 * for in the clusters used for the base metadata cluster, or
85 * if we can increment the base metadata cluster to include
86 * that block. Otherwise, we will have to track the cluster
87 * used for the allocation bitmap or inode table explicitly.
88 * Normally all of these blocks are contiguous, so the special
89 * case handling shouldn't be necessary except for *very*
90 * unusual file system layouts.
91 */
92 if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) {
93 block_cluster = EXT4_B2C(sbi,
94 ext4_block_bitmap(sb, gdp) - start);
95 if (block_cluster < num_clusters)
96 block_cluster = -1;
97 else if (block_cluster == num_clusters) {
98 num_clusters++;
99 block_cluster = -1;
100 }
101 }
102
103 if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) {
104 inode_cluster = EXT4_B2C(sbi,
105 ext4_inode_bitmap(sb, gdp) - start);
106 if (inode_cluster < num_clusters)
107 inode_cluster = -1;
108 else if (inode_cluster == num_clusters) {
109 num_clusters++;
110 inode_cluster = -1;
111 }
112 }
113
114 itbl_blk = ext4_inode_table(sb, gdp);
115 for (i = 0; i < sbi->s_itb_per_group; i++) {
116 if (ext4_block_in_group(sb, itbl_blk + i, block_group)) {
117 c = EXT4_B2C(sbi, itbl_blk + i - start);
118 if ((c < num_clusters) || (c == inode_cluster) ||
119 (c == block_cluster) || (c == itbl_cluster))
120 continue;
121 if (c == num_clusters) {
122 num_clusters++;
123 continue;
124 }
125 num_clusters++;
126 itbl_cluster = c;
127 }
128 }
129
130 if (block_cluster != -1)
131 num_clusters++;
132 if (inode_cluster != -1)
133 num_clusters++;
134
135 return num_clusters;
136 }
137
138 static unsigned int num_clusters_in_group(struct super_block *sb,
139 ext4_group_t block_group)
140 {
141 unsigned int blocks;
142
143 if (block_group == ext4_get_groups_count(sb) - 1) {
144 /*
145 * Even though mke2fs always initializes the first and
146 * last group, just in case some other tool was used,
147 * we need to make sure we calculate the right free
148 * blocks.
149 */
150 blocks = ext4_blocks_count(EXT4_SB(sb)->s_es) -
151 ext4_group_first_block_no(sb, block_group);
152 } else
153 blocks = EXT4_BLOCKS_PER_GROUP(sb);
154 return EXT4_NUM_B2C(EXT4_SB(sb), blocks);
155 }
156
157 /* Initializes an uninitialized block bitmap */
158 void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
159 ext4_group_t block_group,
160 struct ext4_group_desc *gdp)
161 {
162 unsigned int bit, bit_max;
163 struct ext4_sb_info *sbi = EXT4_SB(sb);
164 ext4_fsblk_t start, tmp;
165 int flex_bg = 0;
166
167 J_ASSERT_BH(bh, buffer_locked(bh));
168
169 /* If checksum is bad mark all blocks used to prevent allocation
170 * essentially implementing a per-group read-only flag. */
171 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
172 ext4_error(sb, "Checksum bad for group %u", block_group);
173 ext4_free_group_clusters_set(sb, gdp, 0);
174 ext4_free_inodes_set(sb, gdp, 0);
175 ext4_itable_unused_set(sb, gdp, 0);
176 memset(bh->b_data, 0xff, sb->s_blocksize);
177 ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
178 return;
179 }
180 memset(bh->b_data, 0, sb->s_blocksize);
181
182 bit_max = ext4_num_base_meta_clusters(sb, block_group);
183 for (bit = 0; bit < bit_max; bit++)
184 ext4_set_bit(bit, bh->b_data);
185
186 start = ext4_group_first_block_no(sb, block_group);
187
188 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
189 flex_bg = 1;
190
191 /* Set bits for block and inode bitmaps, and inode table */
192 tmp = ext4_block_bitmap(sb, gdp);
193 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
194 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
195
196 tmp = ext4_inode_bitmap(sb, gdp);
197 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
198 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
199
200 tmp = ext4_inode_table(sb, gdp);
201 for (; tmp < ext4_inode_table(sb, gdp) +
202 sbi->s_itb_per_group; tmp++) {
203 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
204 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
205 }
206
207 /*
208 * Also if the number of blocks within the group is less than
209 * the blocksize * 8 ( which is the size of bitmap ), set rest
210 * of the block bitmap to 1
211 */
212 ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
213 sb->s_blocksize * 8, bh->b_data);
214 ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
215 ext4_group_desc_csum_set(sb, block_group, gdp);
216 }
217
218 /* Return the number of free blocks in a block group. It is used when
219 * the block bitmap is uninitialized, so we can't just count the bits
220 * in the bitmap. */
221 unsigned ext4_free_clusters_after_init(struct super_block *sb,
222 ext4_group_t block_group,
223 struct ext4_group_desc *gdp)
224 {
225 return num_clusters_in_group(sb, block_group) -
226 ext4_num_overhead_clusters(sb, block_group, gdp);
227 }
228
229 /*
230 * The free blocks are managed by bitmaps. A file system contains several
231 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
232 * block for inodes, N blocks for the inode table and data blocks.
233 *
234 * The file system contains group descriptors which are located after the
235 * super block. Each descriptor contains the number of the bitmap block and
236 * the free blocks count in the block. The descriptors are loaded in memory
237 * when a file system is mounted (see ext4_fill_super).
238 */
239
240 /**
241 * ext4_get_group_desc() -- load group descriptor from disk
242 * @sb: super block
243 * @block_group: given block group
244 * @bh: pointer to the buffer head to store the block
245 * group descriptor
246 */
247 struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
248 ext4_group_t block_group,
249 struct buffer_head **bh)
250 {
251 unsigned int group_desc;
252 unsigned int offset;
253 ext4_group_t ngroups = ext4_get_groups_count(sb);
254 struct ext4_group_desc *desc;
255 struct ext4_sb_info *sbi = EXT4_SB(sb);
256
257 if (block_group >= ngroups) {
258 ext4_error(sb, "block_group >= groups_count - block_group = %u,"
259 " groups_count = %u", block_group, ngroups);
260
261 return NULL;
262 }
263
264 group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
265 offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
266 if (!sbi->s_group_desc[group_desc]) {
267 ext4_error(sb, "Group descriptor not loaded - "
268 "block_group = %u, group_desc = %u, desc = %u",
269 block_group, group_desc, offset);
270 return NULL;
271 }
272
273 desc = (struct ext4_group_desc *)(
274 (__u8 *)sbi->s_group_desc[group_desc]->b_data +
275 offset * EXT4_DESC_SIZE(sb));
276 if (bh)
277 *bh = sbi->s_group_desc[group_desc];
278 return desc;
279 }
280
281 /*
282 * Return the block number which was discovered to be invalid, or 0 if
283 * the block bitmap is valid.
284 */
285 static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
286 struct ext4_group_desc *desc,
287 unsigned int block_group,
288 struct buffer_head *bh)
289 {
290 ext4_grpblk_t offset;
291 ext4_grpblk_t next_zero_bit;
292 ext4_fsblk_t blk;
293 ext4_fsblk_t group_first_block;
294
295 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
296 /* with FLEX_BG, the inode/block bitmaps and itable
297 * blocks may not be in the group at all
298 * so the bitmap validation will be skipped for those groups
299 * or it has to also read the block group where the bitmaps
300 * are located to verify they are set.
301 */
302 return 0;
303 }
304 group_first_block = ext4_group_first_block_no(sb, block_group);
305
306 /* check whether block bitmap block number is set */
307 blk = ext4_block_bitmap(sb, desc);
308 offset = blk - group_first_block;
309 if (!ext4_test_bit(offset, bh->b_data))
310 /* bad block bitmap */
311 return blk;
312
313 /* check whether the inode bitmap block number is set */
314 blk = ext4_inode_bitmap(sb, desc);
315 offset = blk - group_first_block;
316 if (!ext4_test_bit(offset, bh->b_data))
317 /* bad block bitmap */
318 return blk;
319
320 /* check whether the inode table block number is set */
321 blk = ext4_inode_table(sb, desc);
322 offset = blk - group_first_block;
323 next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
324 offset + EXT4_SB(sb)->s_itb_per_group,
325 offset);
326 if (next_zero_bit < offset + EXT4_SB(sb)->s_itb_per_group)
327 /* bad bitmap for inode tables */
328 return blk;
329 return 0;
330 }
331
332 void ext4_validate_block_bitmap(struct super_block *sb,
333 struct ext4_group_desc *desc,
334 unsigned int block_group,
335 struct buffer_head *bh)
336 {
337 ext4_fsblk_t blk;
338
339 if (buffer_verified(bh))
340 return;
341
342 ext4_lock_group(sb, block_group);
343 blk = ext4_valid_block_bitmap(sb, desc, block_group, bh);
344 if (unlikely(blk != 0)) {
345 ext4_unlock_group(sb, block_group);
346 ext4_error(sb, "bg %u: block %llu: invalid block bitmap",
347 block_group, blk);
348 return;
349 }
350 if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
351 desc, bh))) {
352 ext4_unlock_group(sb, block_group);
353 ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
354 return;
355 }
356 set_buffer_verified(bh);
357 ext4_unlock_group(sb, block_group);
358 }
359
360 /**
361 * ext4_read_block_bitmap_nowait()
362 * @sb: super block
363 * @block_group: given block group
364 *
365 * Read the bitmap for a given block_group,and validate the
366 * bits for block/inode/inode tables are set in the bitmaps
367 *
368 * Return buffer_head on success or NULL in case of failure.
369 */
370 struct buffer_head *
371 ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
372 {
373 struct ext4_group_desc *desc;
374 struct buffer_head *bh;
375 ext4_fsblk_t bitmap_blk;
376
377 desc = ext4_get_group_desc(sb, block_group, NULL);
378 if (!desc)
379 return NULL;
380 bitmap_blk = ext4_block_bitmap(sb, desc);
381 bh = sb_getblk(sb, bitmap_blk);
382 if (unlikely(!bh)) {
383 ext4_error(sb, "Cannot get buffer for block bitmap - "
384 "block_group = %u, block_bitmap = %llu",
385 block_group, bitmap_blk);
386 return NULL;
387 }
388
389 if (bitmap_uptodate(bh))
390 goto verify;
391
392 lock_buffer(bh);
393 if (bitmap_uptodate(bh)) {
394 unlock_buffer(bh);
395 goto verify;
396 }
397 ext4_lock_group(sb, block_group);
398 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
399 ext4_init_block_bitmap(sb, bh, block_group, desc);
400 set_bitmap_uptodate(bh);
401 set_buffer_uptodate(bh);
402 ext4_unlock_group(sb, block_group);
403 unlock_buffer(bh);
404 return bh;
405 }
406 ext4_unlock_group(sb, block_group);
407 if (buffer_uptodate(bh)) {
408 /*
409 * if not uninit if bh is uptodate,
410 * bitmap is also uptodate
411 */
412 set_bitmap_uptodate(bh);
413 unlock_buffer(bh);
414 goto verify;
415 }
416 /*
417 * submit the buffer_head for reading
418 */
419 set_buffer_new(bh);
420 trace_ext4_read_block_bitmap_load(sb, block_group);
421 bh->b_end_io = ext4_end_bitmap_read;
422 get_bh(bh);
423 submit_bh(READ, bh);
424 return bh;
425 verify:
426 ext4_validate_block_bitmap(sb, desc, block_group, bh);
427 return bh;
428 }
429
430 /* Returns 0 on success, 1 on error */
431 int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group,
432 struct buffer_head *bh)
433 {
434 struct ext4_group_desc *desc;
435
436 if (!buffer_new(bh))
437 return 0;
438 desc = ext4_get_group_desc(sb, block_group, NULL);
439 if (!desc)
440 return 1;
441 wait_on_buffer(bh);
442 if (!buffer_uptodate(bh)) {
443 ext4_error(sb, "Cannot read block bitmap - "
444 "block_group = %u, block_bitmap = %llu",
445 block_group, (unsigned long long) bh->b_blocknr);
446 return 1;
447 }
448 clear_buffer_new(bh);
449 /* Panic or remount fs read-only if block bitmap is invalid */
450 ext4_validate_block_bitmap(sb, desc, block_group, bh);
451 return 0;
452 }
453
454 struct buffer_head *
455 ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
456 {
457 struct buffer_head *bh;
458
459 bh = ext4_read_block_bitmap_nowait(sb, block_group);
460 if (!bh)
461 return NULL;
462 if (ext4_wait_block_bitmap(sb, block_group, bh)) {
463 put_bh(bh);
464 return NULL;
465 }
466 return bh;
467 }
468
469 /**
470 * ext4_has_free_clusters()
471 * @sbi: in-core super block structure.
472 * @nclusters: number of needed blocks
473 * @flags: flags from ext4_mb_new_blocks()
474 *
475 * Check if filesystem has nclusters free & available for allocation.
476 * On success return 1, return 0 on failure.
477 */
478 static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
479 s64 nclusters, unsigned int flags)
480 {
481 s64 free_clusters, dirty_clusters, root_clusters;
482 struct percpu_counter *fcc = &sbi->s_freeclusters_counter;
483 struct percpu_counter *dcc = &sbi->s_dirtyclusters_counter;
484
485 free_clusters = percpu_counter_read_positive(fcc);
486 dirty_clusters = percpu_counter_read_positive(dcc);
487
488 /*
489 * r_blocks_count should always be multiple of the cluster ratio so
490 * we are safe to do a plane bit shift only.
491 */
492 root_clusters = ext4_r_blocks_count(sbi->s_es) >> sbi->s_cluster_bits;
493
494 if (free_clusters - (nclusters + root_clusters + dirty_clusters) <
495 EXT4_FREECLUSTERS_WATERMARK) {
496 free_clusters = percpu_counter_sum_positive(fcc);
497 dirty_clusters = percpu_counter_sum_positive(dcc);
498 }
499 /* Check whether we have space after accounting for current
500 * dirty clusters & root reserved clusters.
501 */
502 if (free_clusters >= ((root_clusters + nclusters) + dirty_clusters))
503 return 1;
504
505 /* Hm, nope. Are (enough) root reserved clusters available? */
506 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
507 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
508 capable(CAP_SYS_RESOURCE) ||
509 (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
510
511 if (free_clusters >= (nclusters + dirty_clusters))
512 return 1;
513 }
514
515 return 0;
516 }
517
518 int ext4_claim_free_clusters(struct ext4_sb_info *sbi,
519 s64 nclusters, unsigned int flags)
520 {
521 if (ext4_has_free_clusters(sbi, nclusters, flags)) {
522 percpu_counter_add(&sbi->s_dirtyclusters_counter, nclusters);
523 return 0;
524 } else
525 return -ENOSPC;
526 }
527
528 /**
529 * ext4_should_retry_alloc()
530 * @sb: super block
531 * @retries number of attemps has been made
532 *
533 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
534 * it is profitable to retry the operation, this function will wait
535 * for the current or committing transaction to complete, and then
536 * return TRUE.
537 *
538 * if the total number of retries exceed three times, return FALSE.
539 */
540 int ext4_should_retry_alloc(struct super_block *sb, int *retries)
541 {
542 if (!ext4_has_free_clusters(EXT4_SB(sb), 1, 0) ||
543 (*retries)++ > 3 ||
544 !EXT4_SB(sb)->s_journal)
545 return 0;
546
547 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
548
549 return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
550 }
551
552 /*
553 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
554 *
555 * @handle: handle to this transaction
556 * @inode: file inode
557 * @goal: given target block(filesystem wide)
558 * @count: pointer to total number of clusters needed
559 * @errp: error code
560 *
561 * Return 1st allocated block number on success, *count stores total account
562 * error stores in errp pointer
563 */
564 ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
565 ext4_fsblk_t goal, unsigned int flags,
566 unsigned long *count, int *errp)
567 {
568 struct ext4_allocation_request ar;
569 ext4_fsblk_t ret;
570
571 memset(&ar, 0, sizeof(ar));
572 /* Fill with neighbour allocated blocks */
573 ar.inode = inode;
574 ar.goal = goal;
575 ar.len = count ? *count : 1;
576 ar.flags = flags;
577
578 ret = ext4_mb_new_blocks(handle, &ar, errp);
579 if (count)
580 *count = ar.len;
581 /*
582 * Account for the allocated meta blocks. We will never
583 * fail EDQUOT for metdata, but we do account for it.
584 */
585 if (!(*errp) &&
586 ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) {
587 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
588 EXT4_I(inode)->i_allocated_meta_blocks += ar.len;
589 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
590 dquot_alloc_block_nofail(inode,
591 EXT4_C2B(EXT4_SB(inode->i_sb), ar.len));
592 }
593 return ret;
594 }
595
596 /**
597 * ext4_count_free_clusters() -- count filesystem free clusters
598 * @sb: superblock
599 *
600 * Adds up the number of free clusters from each block group.
601 */
602 ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
603 {
604 ext4_fsblk_t desc_count;
605 struct ext4_group_desc *gdp;
606 ext4_group_t i;
607 ext4_group_t ngroups = ext4_get_groups_count(sb);
608 #ifdef EXT4FS_DEBUG
609 struct ext4_super_block *es;
610 ext4_fsblk_t bitmap_count;
611 unsigned int x;
612 struct buffer_head *bitmap_bh = NULL;
613
614 es = EXT4_SB(sb)->s_es;
615 desc_count = 0;
616 bitmap_count = 0;
617 gdp = NULL;
618
619 for (i = 0; i < ngroups; i++) {
620 gdp = ext4_get_group_desc(sb, i, NULL);
621 if (!gdp)
622 continue;
623 desc_count += ext4_free_group_clusters(sb, gdp);
624 brelse(bitmap_bh);
625 bitmap_bh = ext4_read_block_bitmap(sb, i);
626 if (bitmap_bh == NULL)
627 continue;
628
629 x = ext4_count_free(bitmap_bh->b_data,
630 EXT4_BLOCKS_PER_GROUP(sb) / 8);
631 printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
632 i, ext4_free_group_clusters(sb, gdp), x);
633 bitmap_count += x;
634 }
635 brelse(bitmap_bh);
636 printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu"
637 ", computed = %llu, %llu\n",
638 EXT4_NUM_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
639 desc_count, bitmap_count);
640 return bitmap_count;
641 #else
642 desc_count = 0;
643 for (i = 0; i < ngroups; i++) {
644 gdp = ext4_get_group_desc(sb, i, NULL);
645 if (!gdp)
646 continue;
647 desc_count += ext4_free_group_clusters(sb, gdp);
648 }
649
650 return desc_count;
651 #endif
652 }
653
654 static inline int test_root(ext4_group_t a, int b)
655 {
656 int num = b;
657
658 while (a > num)
659 num *= b;
660 return num == a;
661 }
662
663 static int ext4_group_sparse(ext4_group_t group)
664 {
665 if (group <= 1)
666 return 1;
667 if (!(group & 1))
668 return 0;
669 return (test_root(group, 7) || test_root(group, 5) ||
670 test_root(group, 3));
671 }
672
673 /**
674 * ext4_bg_has_super - number of blocks used by the superblock in group
675 * @sb: superblock for filesystem
676 * @group: group number to check
677 *
678 * Return the number of blocks used by the superblock (primary or backup)
679 * in this group. Currently this will be only 0 or 1.
680 */
681 int ext4_bg_has_super(struct super_block *sb, ext4_group_t group)
682 {
683 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
684 EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
685 !ext4_group_sparse(group))
686 return 0;
687 return 1;
688 }
689
690 static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb,
691 ext4_group_t group)
692 {
693 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
694 ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb);
695 ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1;
696
697 if (group == first || group == first + 1 || group == last)
698 return 1;
699 return 0;
700 }
701
702 static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb,
703 ext4_group_t group)
704 {
705 if (!ext4_bg_has_super(sb, group))
706 return 0;
707
708 if (EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG))
709 return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
710 else
711 return EXT4_SB(sb)->s_gdb_count;
712 }
713
714 /**
715 * ext4_bg_num_gdb - number of blocks used by the group table in group
716 * @sb: superblock for filesystem
717 * @group: group number to check
718 *
719 * Return the number of blocks used by the group descriptor table
720 * (primary or backup) in this group. In the future there may be a
721 * different number of descriptor blocks in each group.
722 */
723 unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
724 {
725 unsigned long first_meta_bg =
726 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
727 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
728
729 if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) ||
730 metagroup < first_meta_bg)
731 return ext4_bg_num_gdb_nometa(sb, group);
732
733 return ext4_bg_num_gdb_meta(sb,group);
734
735 }
736
737 /*
738 * This function returns the number of file system metadata clusters at
739 * the beginning of a block group, including the reserved gdt blocks.
740 */
741 static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
742 ext4_group_t block_group)
743 {
744 struct ext4_sb_info *sbi = EXT4_SB(sb);
745 unsigned num;
746
747 /* Check for superblock and gdt backups in this group */
748 num = ext4_bg_has_super(sb, block_group);
749
750 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) ||
751 block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
752 sbi->s_desc_per_block) {
753 if (num) {
754 num += ext4_bg_num_gdb(sb, block_group);
755 num += le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
756 }
757 } else { /* For META_BG_BLOCK_GROUPS */
758 num += ext4_bg_num_gdb(sb, block_group);
759 }
760 return EXT4_NUM_B2C(sbi, num);
761 }
762 /**
763 * ext4_inode_to_goal_block - return a hint for block allocation
764 * @inode: inode for block allocation
765 *
766 * Return the ideal location to start allocating blocks for a
767 * newly created inode.
768 */
769 ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode)
770 {
771 struct ext4_inode_info *ei = EXT4_I(inode);
772 ext4_group_t block_group;
773 ext4_grpblk_t colour;
774 int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
775 ext4_fsblk_t bg_start;
776 ext4_fsblk_t last_block;
777
778 block_group = ei->i_block_group;
779 if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
780 /*
781 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
782 * block groups per flexgroup, reserve the first block
783 * group for directories and special files. Regular
784 * files will start at the second block group. This
785 * tends to speed up directory access and improves
786 * fsck times.
787 */
788 block_group &= ~(flex_size-1);
789 if (S_ISREG(inode->i_mode))
790 block_group++;
791 }
792 bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
793 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
794
795 /*
796 * If we are doing delayed allocation, we don't need take
797 * colour into account.
798 */
799 if (test_opt(inode->i_sb, DELALLOC))
800 return bg_start;
801
802 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
803 colour = (current->pid % 16) *
804 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
805 else
806 colour = (current->pid % 16) * ((last_block - bg_start) / 16);
807 return bg_start + colour;
808 }
809