[GFS2] Fix unlinked file handling
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / gfs2 / rgrp.c
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
8 */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/fs.h>
16 #include <linux/gfs2_ondisk.h>
17
18 #include "gfs2.h"
19 #include "lm_interface.h"
20 #include "incore.h"
21 #include "glock.h"
22 #include "glops.h"
23 #include "lops.h"
24 #include "meta_io.h"
25 #include "quota.h"
26 #include "rgrp.h"
27 #include "super.h"
28 #include "trans.h"
29 #include "ops_file.h"
30 #include "util.h"
31
32 #define BFITNOENT 0xFFFFFFFF
33
34 /*
35 * These routines are used by the resource group routines (rgrp.c)
36 * to keep track of block allocation. Each block is represented by two
37 * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks.
38 *
39 * 0 = Free
40 * 1 = Used (not metadata)
41 * 2 = Unlinked (still in use) inode
42 * 3 = Used (metadata)
43 */
44
45 static const char valid_change[16] = {
46 /* current */
47 /* n */ 0, 1, 1, 1,
48 /* e */ 1, 0, 0, 0,
49 /* w */ 0, 0, 0, 1,
50 1, 0, 0, 0
51 };
52
53 /**
54 * gfs2_setbit - Set a bit in the bitmaps
55 * @buffer: the buffer that holds the bitmaps
56 * @buflen: the length (in bytes) of the buffer
57 * @block: the block to set
58 * @new_state: the new state of the block
59 *
60 */
61
62 static void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buffer,
63 unsigned int buflen, uint32_t block,
64 unsigned char new_state)
65 {
66 unsigned char *byte, *end, cur_state;
67 unsigned int bit;
68
69 byte = buffer + (block / GFS2_NBBY);
70 bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE;
71 end = buffer + buflen;
72
73 gfs2_assert(rgd->rd_sbd, byte < end);
74
75 cur_state = (*byte >> bit) & GFS2_BIT_MASK;
76
77 if (valid_change[new_state * 4 + cur_state]) {
78 *byte ^= cur_state << bit;
79 *byte |= new_state << bit;
80 } else
81 gfs2_consist_rgrpd(rgd);
82 }
83
84 /**
85 * gfs2_testbit - test a bit in the bitmaps
86 * @buffer: the buffer that holds the bitmaps
87 * @buflen: the length (in bytes) of the buffer
88 * @block: the block to read
89 *
90 */
91
92 static unsigned char gfs2_testbit(struct gfs2_rgrpd *rgd, unsigned char *buffer,
93 unsigned int buflen, uint32_t block)
94 {
95 unsigned char *byte, *end, cur_state;
96 unsigned int bit;
97
98 byte = buffer + (block / GFS2_NBBY);
99 bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE;
100 end = buffer + buflen;
101
102 gfs2_assert(rgd->rd_sbd, byte < end);
103
104 cur_state = (*byte >> bit) & GFS2_BIT_MASK;
105
106 return cur_state;
107 }
108
109 /**
110 * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
111 * a block in a given allocation state.
112 * @buffer: the buffer that holds the bitmaps
113 * @buflen: the length (in bytes) of the buffer
114 * @goal: start search at this block's bit-pair (within @buffer)
115 * @old_state: GFS2_BLKST_XXX the state of the block we're looking for;
116 * bit 0 = alloc(1)/free(0), bit 1 = meta(1)/data(0)
117 *
118 * Scope of @goal and returned block number is only within this bitmap buffer,
119 * not entire rgrp or filesystem. @buffer will be offset from the actual
120 * beginning of a bitmap block buffer, skipping any header structures.
121 *
122 * Return: the block number (bitmap buffer scope) that was found
123 */
124
125 static uint32_t gfs2_bitfit(struct gfs2_rgrpd *rgd, unsigned char *buffer,
126 unsigned int buflen, uint32_t goal,
127 unsigned char old_state)
128 {
129 unsigned char *byte, *end, alloc;
130 uint32_t blk = goal;
131 unsigned int bit;
132
133 byte = buffer + (goal / GFS2_NBBY);
134 bit = (goal % GFS2_NBBY) * GFS2_BIT_SIZE;
135 end = buffer + buflen;
136 alloc = (old_state & 1) ? 0 : 0x55;
137
138 while (byte < end) {
139 if ((*byte & 0x55) == alloc) {
140 blk += (8 - bit) >> 1;
141
142 bit = 0;
143 byte++;
144
145 continue;
146 }
147
148 if (((*byte >> bit) & GFS2_BIT_MASK) == old_state)
149 return blk;
150
151 bit += GFS2_BIT_SIZE;
152 if (bit >= 8) {
153 bit = 0;
154 byte++;
155 }
156
157 blk++;
158 }
159
160 return BFITNOENT;
161 }
162
163 /**
164 * gfs2_bitcount - count the number of bits in a certain state
165 * @buffer: the buffer that holds the bitmaps
166 * @buflen: the length (in bytes) of the buffer
167 * @state: the state of the block we're looking for
168 *
169 * Returns: The number of bits
170 */
171
172 static uint32_t gfs2_bitcount(struct gfs2_rgrpd *rgd, unsigned char *buffer,
173 unsigned int buflen, unsigned char state)
174 {
175 unsigned char *byte = buffer;
176 unsigned char *end = buffer + buflen;
177 unsigned char state1 = state << 2;
178 unsigned char state2 = state << 4;
179 unsigned char state3 = state << 6;
180 uint32_t count = 0;
181
182 for (; byte < end; byte++) {
183 if (((*byte) & 0x03) == state)
184 count++;
185 if (((*byte) & 0x0C) == state1)
186 count++;
187 if (((*byte) & 0x30) == state2)
188 count++;
189 if (((*byte) & 0xC0) == state3)
190 count++;
191 }
192
193 return count;
194 }
195
196 /**
197 * gfs2_rgrp_verify - Verify that a resource group is consistent
198 * @sdp: the filesystem
199 * @rgd: the rgrp
200 *
201 */
202
203 void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
204 {
205 struct gfs2_sbd *sdp = rgd->rd_sbd;
206 struct gfs2_bitmap *bi = NULL;
207 uint32_t length = rgd->rd_ri.ri_length;
208 uint32_t count[4], tmp;
209 int buf, x;
210
211 memset(count, 0, 4 * sizeof(uint32_t));
212
213 /* Count # blocks in each of 4 possible allocation states */
214 for (buf = 0; buf < length; buf++) {
215 bi = rgd->rd_bits + buf;
216 for (x = 0; x < 4; x++)
217 count[x] += gfs2_bitcount(rgd,
218 bi->bi_bh->b_data +
219 bi->bi_offset,
220 bi->bi_len, x);
221 }
222
223 if (count[0] != rgd->rd_rg.rg_free) {
224 if (gfs2_consist_rgrpd(rgd))
225 fs_err(sdp, "free data mismatch: %u != %u\n",
226 count[0], rgd->rd_rg.rg_free);
227 return;
228 }
229
230 tmp = rgd->rd_ri.ri_data -
231 rgd->rd_rg.rg_free -
232 rgd->rd_rg.rg_dinodes;
233 if (count[1] + count[2] != tmp) {
234 if (gfs2_consist_rgrpd(rgd))
235 fs_err(sdp, "used data mismatch: %u != %u\n",
236 count[1], tmp);
237 return;
238 }
239
240 if (count[3] != rgd->rd_rg.rg_dinodes) {
241 if (gfs2_consist_rgrpd(rgd))
242 fs_err(sdp, "used metadata mismatch: %u != %u\n",
243 count[3], rgd->rd_rg.rg_dinodes);
244 return;
245 }
246
247 if (count[2] > count[3]) {
248 if (gfs2_consist_rgrpd(rgd))
249 fs_err(sdp, "unlinked inodes > inodes: %u\n",
250 count[2]);
251 return;
252 }
253
254 }
255
256 static inline int rgrp_contains_block(struct gfs2_rindex *ri, uint64_t block)
257 {
258 uint64_t first = ri->ri_data0;
259 uint64_t last = first + ri->ri_data;
260 return !!(first <= block && block < last);
261 }
262
263 /**
264 * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
265 * @sdp: The GFS2 superblock
266 * @n: The data block number
267 *
268 * Returns: The resource group, or NULL if not found
269 */
270
271 struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, uint64_t blk)
272 {
273 struct gfs2_rgrpd *rgd;
274
275 spin_lock(&sdp->sd_rindex_spin);
276
277 list_for_each_entry(rgd, &sdp->sd_rindex_mru_list, rd_list_mru) {
278 if (rgrp_contains_block(&rgd->rd_ri, blk)) {
279 list_move(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list);
280 spin_unlock(&sdp->sd_rindex_spin);
281 return rgd;
282 }
283 }
284
285 spin_unlock(&sdp->sd_rindex_spin);
286
287 return NULL;
288 }
289
290 /**
291 * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
292 * @sdp: The GFS2 superblock
293 *
294 * Returns: The first rgrp in the filesystem
295 */
296
297 struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
298 {
299 gfs2_assert(sdp, !list_empty(&sdp->sd_rindex_list));
300 return list_entry(sdp->sd_rindex_list.next, struct gfs2_rgrpd, rd_list);
301 }
302
303 /**
304 * gfs2_rgrpd_get_next - get the next RG
305 * @rgd: A RG
306 *
307 * Returns: The next rgrp
308 */
309
310 struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
311 {
312 if (rgd->rd_list.next == &rgd->rd_sbd->sd_rindex_list)
313 return NULL;
314 return list_entry(rgd->rd_list.next, struct gfs2_rgrpd, rd_list);
315 }
316
317 static void clear_rgrpdi(struct gfs2_sbd *sdp)
318 {
319 struct list_head *head;
320 struct gfs2_rgrpd *rgd;
321 struct gfs2_glock *gl;
322
323 spin_lock(&sdp->sd_rindex_spin);
324 sdp->sd_rindex_forward = NULL;
325 head = &sdp->sd_rindex_recent_list;
326 while (!list_empty(head)) {
327 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_recent);
328 list_del(&rgd->rd_recent);
329 }
330 spin_unlock(&sdp->sd_rindex_spin);
331
332 head = &sdp->sd_rindex_list;
333 while (!list_empty(head)) {
334 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_list);
335 gl = rgd->rd_gl;
336
337 list_del(&rgd->rd_list);
338 list_del(&rgd->rd_list_mru);
339
340 if (gl) {
341 gl->gl_object = NULL;
342 gfs2_glock_put(gl);
343 }
344
345 kfree(rgd->rd_bits);
346 kfree(rgd);
347 }
348 }
349
350 void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
351 {
352 mutex_lock(&sdp->sd_rindex_mutex);
353 clear_rgrpdi(sdp);
354 mutex_unlock(&sdp->sd_rindex_mutex);
355 }
356
357 /**
358 * gfs2_compute_bitstructs - Compute the bitmap sizes
359 * @rgd: The resource group descriptor
360 *
361 * Calculates bitmap descriptors, one for each block that contains bitmap data
362 *
363 * Returns: errno
364 */
365
366 static int compute_bitstructs(struct gfs2_rgrpd *rgd)
367 {
368 struct gfs2_sbd *sdp = rgd->rd_sbd;
369 struct gfs2_bitmap *bi;
370 uint32_t length = rgd->rd_ri.ri_length; /* # blocks in hdr & bitmap */
371 uint32_t bytes_left, bytes;
372 int x;
373
374 if (!length)
375 return -EINVAL;
376
377 rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_KERNEL);
378 if (!rgd->rd_bits)
379 return -ENOMEM;
380
381 bytes_left = rgd->rd_ri.ri_bitbytes;
382
383 for (x = 0; x < length; x++) {
384 bi = rgd->rd_bits + x;
385
386 /* small rgrp; bitmap stored completely in header block */
387 if (length == 1) {
388 bytes = bytes_left;
389 bi->bi_offset = sizeof(struct gfs2_rgrp);
390 bi->bi_start = 0;
391 bi->bi_len = bytes;
392 /* header block */
393 } else if (x == 0) {
394 bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
395 bi->bi_offset = sizeof(struct gfs2_rgrp);
396 bi->bi_start = 0;
397 bi->bi_len = bytes;
398 /* last block */
399 } else if (x + 1 == length) {
400 bytes = bytes_left;
401 bi->bi_offset = sizeof(struct gfs2_meta_header);
402 bi->bi_start = rgd->rd_ri.ri_bitbytes - bytes_left;
403 bi->bi_len = bytes;
404 /* other blocks */
405 } else {
406 bytes = sdp->sd_sb.sb_bsize -
407 sizeof(struct gfs2_meta_header);
408 bi->bi_offset = sizeof(struct gfs2_meta_header);
409 bi->bi_start = rgd->rd_ri.ri_bitbytes - bytes_left;
410 bi->bi_len = bytes;
411 }
412
413 bytes_left -= bytes;
414 }
415
416 if (bytes_left) {
417 gfs2_consist_rgrpd(rgd);
418 return -EIO;
419 }
420 bi = rgd->rd_bits + (length - 1);
421 if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_ri.ri_data) {
422 if (gfs2_consist_rgrpd(rgd)) {
423 gfs2_rindex_print(&rgd->rd_ri);
424 fs_err(sdp, "start=%u len=%u offset=%u\n",
425 bi->bi_start, bi->bi_len, bi->bi_offset);
426 }
427 return -EIO;
428 }
429
430 return 0;
431 }
432
433 /**
434 * gfs2_ri_update - Pull in a new resource index from the disk
435 * @gl: The glock covering the rindex inode
436 *
437 * Returns: 0 on successful update, error code otherwise
438 */
439
440 static int gfs2_ri_update(struct gfs2_inode *ip)
441 {
442 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
443 struct inode *inode = &ip->i_inode;
444 struct gfs2_rgrpd *rgd;
445 char buf[sizeof(struct gfs2_rindex)];
446 struct file_ra_state ra_state;
447 uint64_t junk = ip->i_di.di_size;
448 int error;
449
450 printk(KERN_INFO "gfs2_ri_update inode=%p\n", inode);
451
452 if (do_div(junk, sizeof(struct gfs2_rindex))) {
453 gfs2_consist_inode(ip);
454 return -EIO;
455 }
456
457 clear_rgrpdi(sdp);
458
459 printk(KERN_INFO "rgrps cleared\n");
460
461 file_ra_state_init(&ra_state, inode->i_mapping);
462 for (sdp->sd_rgrps = 0;; sdp->sd_rgrps++) {
463 loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
464 printk(KERN_INFO "reading rgrp %d\n", sdp->sd_rgrps);
465 error = gfs2_internal_read(ip, &ra_state, buf, &pos,
466 sizeof(struct gfs2_rindex));
467 if (!error)
468 break;
469 if (error != sizeof(struct gfs2_rindex)) {
470 if (error > 0)
471 error = -EIO;
472 goto fail;
473 }
474
475 rgd = kzalloc(sizeof(struct gfs2_rgrpd), GFP_KERNEL);
476 error = -ENOMEM;
477 if (!rgd)
478 goto fail;
479
480 mutex_init(&rgd->rd_mutex);
481 lops_init_le(&rgd->rd_le, &gfs2_rg_lops);
482 rgd->rd_sbd = sdp;
483
484 list_add_tail(&rgd->rd_list, &sdp->sd_rindex_list);
485 list_add_tail(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list);
486
487 gfs2_rindex_in(&rgd->rd_ri, buf);
488 printk(KERN_INFO "compute bitstructs\n");
489 error = compute_bitstructs(rgd);
490 if (error)
491 goto fail;
492
493 printk(KERN_INFO "gfs2_glock_get\n");
494 error = gfs2_glock_get(sdp, rgd->rd_ri.ri_addr,
495 &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
496 printk(KERN_INFO "gfs2_glock_got one\n");
497 if (error)
498 goto fail;
499
500 rgd->rd_gl->gl_object = rgd;
501 rgd->rd_rg_vn = rgd->rd_gl->gl_vn - 1;
502 }
503
504 printk(KERN_INFO "ok, finished\n");
505 sdp->sd_rindex_vn = ip->i_gl->gl_vn;
506 return 0;
507
508 fail:
509 printk(KERN_INFO "fail\n");
510 clear_rgrpdi(sdp);
511 printk(KERN_INFO "cleared rgrps\n");
512 return error;
513 }
514
515 /**
516 * gfs2_rindex_hold - Grab a lock on the rindex
517 * @sdp: The GFS2 superblock
518 * @ri_gh: the glock holder
519 *
520 * We grab a lock on the rindex inode to make sure that it doesn't
521 * change whilst we are performing an operation. We keep this lock
522 * for quite long periods of time compared to other locks. This
523 * doesn't matter, since it is shared and it is very, very rarely
524 * accessed in the exclusive mode (i.e. only when expanding the filesystem).
525 *
526 * This makes sure that we're using the latest copy of the resource index
527 * special file, which might have been updated if someone expanded the
528 * filesystem (via gfs2_grow utility), which adds new resource groups.
529 *
530 * Returns: 0 on success, error code otherwise
531 */
532
533 int gfs2_rindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ri_gh)
534 {
535 struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
536 struct gfs2_glock *gl = ip->i_gl;
537 int error;
538
539 error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, ri_gh);
540 if (error)
541 return error;
542
543 /* Read new copy from disk if we don't have the latest */
544 if (sdp->sd_rindex_vn != gl->gl_vn) {
545 mutex_lock(&sdp->sd_rindex_mutex);
546 if (sdp->sd_rindex_vn != gl->gl_vn) {
547 error = gfs2_ri_update(ip);
548 if (error)
549 gfs2_glock_dq_uninit(ri_gh);
550 }
551 mutex_unlock(&sdp->sd_rindex_mutex);
552 }
553
554 return error;
555 }
556
557 /**
558 * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
559 * @rgd: the struct gfs2_rgrpd describing the RG to read in
560 *
561 * Read in all of a Resource Group's header and bitmap blocks.
562 * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
563 *
564 * Returns: errno
565 */
566
567 int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
568 {
569 struct gfs2_sbd *sdp = rgd->rd_sbd;
570 struct gfs2_glock *gl = rgd->rd_gl;
571 unsigned int length = rgd->rd_ri.ri_length;
572 struct gfs2_bitmap *bi;
573 unsigned int x, y;
574 int error;
575
576 mutex_lock(&rgd->rd_mutex);
577
578 spin_lock(&sdp->sd_rindex_spin);
579 if (rgd->rd_bh_count) {
580 rgd->rd_bh_count++;
581 spin_unlock(&sdp->sd_rindex_spin);
582 mutex_unlock(&rgd->rd_mutex);
583 return 0;
584 }
585 spin_unlock(&sdp->sd_rindex_spin);
586
587 for (x = 0; x < length; x++) {
588 bi = rgd->rd_bits + x;
589 error = gfs2_meta_read(gl, rgd->rd_ri.ri_addr + x, DIO_START,
590 &bi->bi_bh);
591 if (error)
592 goto fail;
593 }
594
595 for (y = length; y--;) {
596 bi = rgd->rd_bits + y;
597 error = gfs2_meta_reread(sdp, bi->bi_bh, DIO_WAIT);
598 if (error)
599 goto fail;
600 if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB :
601 GFS2_METATYPE_RG)) {
602 error = -EIO;
603 goto fail;
604 }
605 }
606
607 if (rgd->rd_rg_vn != gl->gl_vn) {
608 gfs2_rgrp_in(&rgd->rd_rg, (rgd->rd_bits[0].bi_bh)->b_data);
609 rgd->rd_rg_vn = gl->gl_vn;
610 }
611
612 spin_lock(&sdp->sd_rindex_spin);
613 rgd->rd_free_clone = rgd->rd_rg.rg_free;
614 rgd->rd_bh_count++;
615 spin_unlock(&sdp->sd_rindex_spin);
616
617 mutex_unlock(&rgd->rd_mutex);
618
619 return 0;
620
621 fail:
622 while (x--) {
623 bi = rgd->rd_bits + x;
624 brelse(bi->bi_bh);
625 bi->bi_bh = NULL;
626 gfs2_assert_warn(sdp, !bi->bi_clone);
627 }
628 mutex_unlock(&rgd->rd_mutex);
629
630 return error;
631 }
632
633 void gfs2_rgrp_bh_hold(struct gfs2_rgrpd *rgd)
634 {
635 struct gfs2_sbd *sdp = rgd->rd_sbd;
636
637 spin_lock(&sdp->sd_rindex_spin);
638 gfs2_assert_warn(rgd->rd_sbd, rgd->rd_bh_count);
639 rgd->rd_bh_count++;
640 spin_unlock(&sdp->sd_rindex_spin);
641 }
642
643 /**
644 * gfs2_rgrp_bh_put - Release RG bitmaps read in with gfs2_rgrp_bh_get()
645 * @rgd: the struct gfs2_rgrpd describing the RG to read in
646 *
647 */
648
649 void gfs2_rgrp_bh_put(struct gfs2_rgrpd *rgd)
650 {
651 struct gfs2_sbd *sdp = rgd->rd_sbd;
652 int x, length = rgd->rd_ri.ri_length;
653
654 spin_lock(&sdp->sd_rindex_spin);
655 gfs2_assert_warn(rgd->rd_sbd, rgd->rd_bh_count);
656 if (--rgd->rd_bh_count) {
657 spin_unlock(&sdp->sd_rindex_spin);
658 return;
659 }
660
661 for (x = 0; x < length; x++) {
662 struct gfs2_bitmap *bi = rgd->rd_bits + x;
663 kfree(bi->bi_clone);
664 bi->bi_clone = NULL;
665 brelse(bi->bi_bh);
666 bi->bi_bh = NULL;
667 }
668
669 spin_unlock(&sdp->sd_rindex_spin);
670 }
671
672 void gfs2_rgrp_repolish_clones(struct gfs2_rgrpd *rgd)
673 {
674 struct gfs2_sbd *sdp = rgd->rd_sbd;
675 unsigned int length = rgd->rd_ri.ri_length;
676 unsigned int x;
677
678 for (x = 0; x < length; x++) {
679 struct gfs2_bitmap *bi = rgd->rd_bits + x;
680 if (!bi->bi_clone)
681 continue;
682 memcpy(bi->bi_clone + bi->bi_offset,
683 bi->bi_bh->b_data + bi->bi_offset, bi->bi_len);
684 }
685
686 spin_lock(&sdp->sd_rindex_spin);
687 rgd->rd_free_clone = rgd->rd_rg.rg_free;
688 spin_unlock(&sdp->sd_rindex_spin);
689 }
690
691 /**
692 * gfs2_alloc_get - get the struct gfs2_alloc structure for an inode
693 * @ip: the incore GFS2 inode structure
694 *
695 * Returns: the struct gfs2_alloc
696 */
697
698 struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip)
699 {
700 struct gfs2_alloc *al = &ip->i_alloc;
701
702 /* FIXME: Should assert that the correct locks are held here... */
703 memset(al, 0, sizeof(*al));
704 return al;
705 }
706
707 /**
708 * gfs2_alloc_put - throw away the struct gfs2_alloc for an inode
709 * @ip: the inode
710 *
711 */
712
713 void gfs2_alloc_put(struct gfs2_inode *ip)
714 {
715 return;
716 }
717
718 /**
719 * try_rgrp_fit - See if a given reservation will fit in a given RG
720 * @rgd: the RG data
721 * @al: the struct gfs2_alloc structure describing the reservation
722 *
723 * If there's room for the requested blocks to be allocated from the RG:
724 * Sets the $al_reserved_data field in @al.
725 * Sets the $al_reserved_meta field in @al.
726 * Sets the $al_rgd field in @al.
727 *
728 * Returns: 1 on success (it fits), 0 on failure (it doesn't fit)
729 */
730
731 static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_alloc *al)
732 {
733 struct gfs2_sbd *sdp = rgd->rd_sbd;
734 int ret = 0;
735
736 spin_lock(&sdp->sd_rindex_spin);
737 if (rgd->rd_free_clone >= al->al_requested) {
738 al->al_rgd = rgd;
739 ret = 1;
740 }
741 spin_unlock(&sdp->sd_rindex_spin);
742
743 return ret;
744 }
745
746 /**
747 * recent_rgrp_first - get first RG from "recent" list
748 * @sdp: The GFS2 superblock
749 * @rglast: address of the rgrp used last
750 *
751 * Returns: The first rgrp in the recent list
752 */
753
754 static struct gfs2_rgrpd *recent_rgrp_first(struct gfs2_sbd *sdp,
755 uint64_t rglast)
756 {
757 struct gfs2_rgrpd *rgd = NULL;
758
759 spin_lock(&sdp->sd_rindex_spin);
760
761 if (list_empty(&sdp->sd_rindex_recent_list))
762 goto out;
763
764 if (!rglast)
765 goto first;
766
767 list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) {
768 if (rgd->rd_ri.ri_addr == rglast)
769 goto out;
770 }
771
772 first:
773 rgd = list_entry(sdp->sd_rindex_recent_list.next, struct gfs2_rgrpd,
774 rd_recent);
775 out:
776 spin_unlock(&sdp->sd_rindex_spin);
777 return rgd;
778 }
779
780 /**
781 * recent_rgrp_next - get next RG from "recent" list
782 * @cur_rgd: current rgrp
783 * @remove:
784 *
785 * Returns: The next rgrp in the recent list
786 */
787
788 static struct gfs2_rgrpd *recent_rgrp_next(struct gfs2_rgrpd *cur_rgd,
789 int remove)
790 {
791 struct gfs2_sbd *sdp = cur_rgd->rd_sbd;
792 struct list_head *head;
793 struct gfs2_rgrpd *rgd;
794
795 spin_lock(&sdp->sd_rindex_spin);
796
797 head = &sdp->sd_rindex_recent_list;
798
799 list_for_each_entry(rgd, head, rd_recent) {
800 if (rgd == cur_rgd) {
801 if (cur_rgd->rd_recent.next != head)
802 rgd = list_entry(cur_rgd->rd_recent.next,
803 struct gfs2_rgrpd, rd_recent);
804 else
805 rgd = NULL;
806
807 if (remove)
808 list_del(&cur_rgd->rd_recent);
809
810 goto out;
811 }
812 }
813
814 rgd = NULL;
815 if (!list_empty(head))
816 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_recent);
817
818 out:
819 spin_unlock(&sdp->sd_rindex_spin);
820 return rgd;
821 }
822
823 /**
824 * recent_rgrp_add - add an RG to tail of "recent" list
825 * @new_rgd: The rgrp to add
826 *
827 */
828
829 static void recent_rgrp_add(struct gfs2_rgrpd *new_rgd)
830 {
831 struct gfs2_sbd *sdp = new_rgd->rd_sbd;
832 struct gfs2_rgrpd *rgd;
833 unsigned int count = 0;
834 unsigned int max = sdp->sd_rgrps / gfs2_jindex_size(sdp);
835
836 spin_lock(&sdp->sd_rindex_spin);
837
838 list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) {
839 if (rgd == new_rgd)
840 goto out;
841
842 if (++count >= max)
843 goto out;
844 }
845 list_add_tail(&new_rgd->rd_recent, &sdp->sd_rindex_recent_list);
846
847 out:
848 spin_unlock(&sdp->sd_rindex_spin);
849 }
850
851 /**
852 * forward_rgrp_get - get an rgrp to try next from full list
853 * @sdp: The GFS2 superblock
854 *
855 * Returns: The rgrp to try next
856 */
857
858 static struct gfs2_rgrpd *forward_rgrp_get(struct gfs2_sbd *sdp)
859 {
860 struct gfs2_rgrpd *rgd;
861 unsigned int journals = gfs2_jindex_size(sdp);
862 unsigned int rg = 0, x;
863
864 spin_lock(&sdp->sd_rindex_spin);
865
866 rgd = sdp->sd_rindex_forward;
867 if (!rgd) {
868 if (sdp->sd_rgrps >= journals)
869 rg = sdp->sd_rgrps * sdp->sd_jdesc->jd_jid / journals;
870
871 for (x = 0, rgd = gfs2_rgrpd_get_first(sdp);
872 x < rg;
873 x++, rgd = gfs2_rgrpd_get_next(rgd))
874 /* Do Nothing */;
875
876 sdp->sd_rindex_forward = rgd;
877 }
878
879 spin_unlock(&sdp->sd_rindex_spin);
880
881 return rgd;
882 }
883
884 /**
885 * forward_rgrp_set - set the forward rgrp pointer
886 * @sdp: the filesystem
887 * @rgd: The new forward rgrp
888 *
889 */
890
891 static void forward_rgrp_set(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd)
892 {
893 spin_lock(&sdp->sd_rindex_spin);
894 sdp->sd_rindex_forward = rgd;
895 spin_unlock(&sdp->sd_rindex_spin);
896 }
897
898 /**
899 * get_local_rgrp - Choose and lock a rgrp for allocation
900 * @ip: the inode to reserve space for
901 * @rgp: the chosen and locked rgrp
902 *
903 * Try to acquire rgrp in way which avoids contending with others.
904 *
905 * Returns: errno
906 */
907
908 static int get_local_rgrp(struct gfs2_inode *ip)
909 {
910 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
911 struct gfs2_rgrpd *rgd, *begin = NULL;
912 struct gfs2_alloc *al = &ip->i_alloc;
913 int flags = LM_FLAG_TRY;
914 int skipped = 0;
915 int loops = 0;
916 int error;
917
918 /* Try recently successful rgrps */
919
920 rgd = recent_rgrp_first(sdp, ip->i_last_rg_alloc);
921
922 while (rgd) {
923 error = gfs2_glock_nq_init(rgd->rd_gl,
924 LM_ST_EXCLUSIVE, LM_FLAG_TRY,
925 &al->al_rgd_gh);
926 switch (error) {
927 case 0:
928 if (try_rgrp_fit(rgd, al))
929 goto out;
930 gfs2_glock_dq_uninit(&al->al_rgd_gh);
931 rgd = recent_rgrp_next(rgd, 1);
932 break;
933
934 case GLR_TRYFAILED:
935 rgd = recent_rgrp_next(rgd, 0);
936 break;
937
938 default:
939 return error;
940 }
941 }
942
943 /* Go through full list of rgrps */
944
945 begin = rgd = forward_rgrp_get(sdp);
946
947 for (;;) {
948 error = gfs2_glock_nq_init(rgd->rd_gl,
949 LM_ST_EXCLUSIVE, flags,
950 &al->al_rgd_gh);
951 switch (error) {
952 case 0:
953 if (try_rgrp_fit(rgd, al))
954 goto out;
955 gfs2_glock_dq_uninit(&al->al_rgd_gh);
956 break;
957
958 case GLR_TRYFAILED:
959 skipped++;
960 break;
961
962 default:
963 return error;
964 }
965
966 rgd = gfs2_rgrpd_get_next(rgd);
967 if (!rgd)
968 rgd = gfs2_rgrpd_get_first(sdp);
969
970 if (rgd == begin) {
971 if (++loops >= 2 || !skipped)
972 return -ENOSPC;
973 flags = 0;
974 }
975 }
976
977 out:
978 ip->i_last_rg_alloc = rgd->rd_ri.ri_addr;
979
980 if (begin) {
981 recent_rgrp_add(rgd);
982 rgd = gfs2_rgrpd_get_next(rgd);
983 if (!rgd)
984 rgd = gfs2_rgrpd_get_first(sdp);
985 forward_rgrp_set(sdp, rgd);
986 }
987
988 return 0;
989 }
990
991 /**
992 * gfs2_inplace_reserve_i - Reserve space in the filesystem
993 * @ip: the inode to reserve space for
994 *
995 * Returns: errno
996 */
997
998 int gfs2_inplace_reserve_i(struct gfs2_inode *ip, char *file, unsigned int line)
999 {
1000 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1001 struct gfs2_alloc *al = &ip->i_alloc;
1002 int error;
1003
1004 if (gfs2_assert_warn(sdp, al->al_requested))
1005 return -EINVAL;
1006
1007 error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
1008 if (error)
1009 return error;
1010
1011 error = get_local_rgrp(ip);
1012 if (error) {
1013 gfs2_glock_dq_uninit(&al->al_ri_gh);
1014 return error;
1015 }
1016
1017 al->al_file = file;
1018 al->al_line = line;
1019
1020 return 0;
1021 }
1022
1023 /**
1024 * gfs2_inplace_release - release an inplace reservation
1025 * @ip: the inode the reservation was taken out on
1026 *
1027 * Release a reservation made by gfs2_inplace_reserve().
1028 */
1029
1030 void gfs2_inplace_release(struct gfs2_inode *ip)
1031 {
1032 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1033 struct gfs2_alloc *al = &ip->i_alloc;
1034
1035 if (gfs2_assert_warn(sdp, al->al_alloced <= al->al_requested) == -1)
1036 fs_warn(sdp, "al_alloced = %u, al_requested = %u "
1037 "al_file = %s, al_line = %u\n",
1038 al->al_alloced, al->al_requested, al->al_file,
1039 al->al_line);
1040
1041 al->al_rgd = NULL;
1042 gfs2_glock_dq_uninit(&al->al_rgd_gh);
1043 gfs2_glock_dq_uninit(&al->al_ri_gh);
1044 }
1045
1046 /**
1047 * gfs2_get_block_type - Check a block in a RG is of given type
1048 * @rgd: the resource group holding the block
1049 * @block: the block number
1050 *
1051 * Returns: The block type (GFS2_BLKST_*)
1052 */
1053
1054 unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, uint64_t block)
1055 {
1056 struct gfs2_bitmap *bi = NULL;
1057 uint32_t length, rgrp_block, buf_block;
1058 unsigned int buf;
1059 unsigned char type;
1060
1061 length = rgd->rd_ri.ri_length;
1062 rgrp_block = block - rgd->rd_ri.ri_data0;
1063
1064 for (buf = 0; buf < length; buf++) {
1065 bi = rgd->rd_bits + buf;
1066 if (rgrp_block < (bi->bi_start + bi->bi_len) * GFS2_NBBY)
1067 break;
1068 }
1069
1070 gfs2_assert(rgd->rd_sbd, buf < length);
1071 buf_block = rgrp_block - bi->bi_start * GFS2_NBBY;
1072
1073 type = gfs2_testbit(rgd, bi->bi_bh->b_data + bi->bi_offset,
1074 bi->bi_len, buf_block);
1075
1076 return type;
1077 }
1078
1079 /**
1080 * rgblk_search - find a block in @old_state, change allocation
1081 * state to @new_state
1082 * @rgd: the resource group descriptor
1083 * @goal: the goal block within the RG (start here to search for avail block)
1084 * @old_state: GFS2_BLKST_XXX the before-allocation state to find
1085 * @new_state: GFS2_BLKST_XXX the after-allocation block state
1086 *
1087 * Walk rgrp's bitmap to find bits that represent a block in @old_state.
1088 * Add the found bitmap buffer to the transaction.
1089 * Set the found bits to @new_state to change block's allocation state.
1090 *
1091 * This function never fails, because we wouldn't call it unless we
1092 * know (from reservation results, etc.) that a block is available.
1093 *
1094 * Scope of @goal and returned block is just within rgrp, not the whole
1095 * filesystem.
1096 *
1097 * Returns: the block number allocated
1098 */
1099
1100 static uint32_t rgblk_search(struct gfs2_rgrpd *rgd, uint32_t goal,
1101 unsigned char old_state, unsigned char new_state)
1102 {
1103 struct gfs2_bitmap *bi = NULL;
1104 uint32_t length = rgd->rd_ri.ri_length;
1105 uint32_t blk = 0;
1106 unsigned int buf, x;
1107
1108 /* Find bitmap block that contains bits for goal block */
1109 for (buf = 0; buf < length; buf++) {
1110 bi = rgd->rd_bits + buf;
1111 if (goal < (bi->bi_start + bi->bi_len) * GFS2_NBBY)
1112 break;
1113 }
1114
1115 gfs2_assert(rgd->rd_sbd, buf < length);
1116
1117 /* Convert scope of "goal" from rgrp-wide to within found bit block */
1118 goal -= bi->bi_start * GFS2_NBBY;
1119
1120 /* Search (up to entire) bitmap in this rgrp for allocatable block.
1121 "x <= length", instead of "x < length", because we typically start
1122 the search in the middle of a bit block, but if we can't find an
1123 allocatable block anywhere else, we want to be able wrap around and
1124 search in the first part of our first-searched bit block. */
1125 for (x = 0; x <= length; x++) {
1126 if (bi->bi_clone)
1127 blk = gfs2_bitfit(rgd, bi->bi_clone + bi->bi_offset,
1128 bi->bi_len, goal, old_state);
1129 else
1130 blk = gfs2_bitfit(rgd,
1131 bi->bi_bh->b_data + bi->bi_offset,
1132 bi->bi_len, goal, old_state);
1133 if (blk != BFITNOENT)
1134 break;
1135
1136 /* Try next bitmap block (wrap back to rgrp header if at end) */
1137 buf = (buf + 1) % length;
1138 bi = rgd->rd_bits + buf;
1139 goal = 0;
1140 }
1141
1142 if (gfs2_assert_withdraw(rgd->rd_sbd, x <= length))
1143 blk = 0;
1144
1145 gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
1146 gfs2_setbit(rgd, bi->bi_bh->b_data + bi->bi_offset,
1147 bi->bi_len, blk, new_state);
1148 if (bi->bi_clone)
1149 gfs2_setbit(rgd, bi->bi_clone + bi->bi_offset,
1150 bi->bi_len, blk, new_state);
1151
1152 return bi->bi_start * GFS2_NBBY + blk;
1153 }
1154
1155 /**
1156 * rgblk_free - Change alloc state of given block(s)
1157 * @sdp: the filesystem
1158 * @bstart: the start of a run of blocks to free
1159 * @blen: the length of the block run (all must lie within ONE RG!)
1160 * @new_state: GFS2_BLKST_XXX the after-allocation block state
1161 *
1162 * Returns: Resource group containing the block(s)
1163 */
1164
1165 static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, uint64_t bstart,
1166 uint32_t blen, unsigned char new_state)
1167 {
1168 struct gfs2_rgrpd *rgd;
1169 struct gfs2_bitmap *bi = NULL;
1170 uint32_t length, rgrp_blk, buf_blk;
1171 unsigned int buf;
1172
1173 rgd = gfs2_blk2rgrpd(sdp, bstart);
1174 if (!rgd) {
1175 if (gfs2_consist(sdp))
1176 fs_err(sdp, "block = %llu\n", (unsigned long long)bstart);
1177 return NULL;
1178 }
1179
1180 length = rgd->rd_ri.ri_length;
1181
1182 rgrp_blk = bstart - rgd->rd_ri.ri_data0;
1183
1184 while (blen--) {
1185 for (buf = 0; buf < length; buf++) {
1186 bi = rgd->rd_bits + buf;
1187 if (rgrp_blk < (bi->bi_start + bi->bi_len) * GFS2_NBBY)
1188 break;
1189 }
1190
1191 gfs2_assert(rgd->rd_sbd, buf < length);
1192
1193 buf_blk = rgrp_blk - bi->bi_start * GFS2_NBBY;
1194 rgrp_blk++;
1195
1196 if (!bi->bi_clone) {
1197 bi->bi_clone = kmalloc(bi->bi_bh->b_size,
1198 GFP_KERNEL | __GFP_NOFAIL);
1199 memcpy(bi->bi_clone + bi->bi_offset,
1200 bi->bi_bh->b_data + bi->bi_offset,
1201 bi->bi_len);
1202 }
1203 gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
1204 gfs2_setbit(rgd,
1205 bi->bi_bh->b_data + bi->bi_offset,
1206 bi->bi_len, buf_blk, new_state);
1207 }
1208
1209 return rgd;
1210 }
1211
1212 /**
1213 * gfs2_alloc_data - Allocate a data block
1214 * @ip: the inode to allocate the data block for
1215 *
1216 * Returns: the allocated block
1217 */
1218
1219 uint64_t gfs2_alloc_data(struct gfs2_inode *ip)
1220 {
1221 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1222 struct gfs2_alloc *al = &ip->i_alloc;
1223 struct gfs2_rgrpd *rgd = al->al_rgd;
1224 uint32_t goal, blk;
1225 uint64_t block;
1226
1227 if (rgrp_contains_block(&rgd->rd_ri, ip->i_di.di_goal_data))
1228 goal = ip->i_di.di_goal_data - rgd->rd_ri.ri_data0;
1229 else
1230 goal = rgd->rd_last_alloc_data;
1231
1232 blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, GFS2_BLKST_USED);
1233 rgd->rd_last_alloc_data = blk;
1234
1235 block = rgd->rd_ri.ri_data0 + blk;
1236 ip->i_di.di_goal_data = block;
1237
1238 gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free);
1239 rgd->rd_rg.rg_free--;
1240
1241 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1242 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1243
1244 al->al_alloced++;
1245
1246 gfs2_statfs_change(sdp, 0, -1, 0);
1247 gfs2_quota_change(ip, +1, ip->i_di.di_uid, ip->i_di.di_gid);
1248
1249 spin_lock(&sdp->sd_rindex_spin);
1250 rgd->rd_free_clone--;
1251 spin_unlock(&sdp->sd_rindex_spin);
1252
1253 return block;
1254 }
1255
1256 /**
1257 * gfs2_alloc_meta - Allocate a metadata block
1258 * @ip: the inode to allocate the metadata block for
1259 *
1260 * Returns: the allocated block
1261 */
1262
1263 uint64_t gfs2_alloc_meta(struct gfs2_inode *ip)
1264 {
1265 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1266 struct gfs2_alloc *al = &ip->i_alloc;
1267 struct gfs2_rgrpd *rgd = al->al_rgd;
1268 uint32_t goal, blk;
1269 uint64_t block;
1270
1271 if (rgrp_contains_block(&rgd->rd_ri, ip->i_di.di_goal_meta))
1272 goal = ip->i_di.di_goal_meta - rgd->rd_ri.ri_data0;
1273 else
1274 goal = rgd->rd_last_alloc_meta;
1275
1276 blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, GFS2_BLKST_USED);
1277 rgd->rd_last_alloc_meta = blk;
1278
1279 block = rgd->rd_ri.ri_data0 + blk;
1280 ip->i_di.di_goal_meta = block;
1281
1282 gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free);
1283 rgd->rd_rg.rg_free--;
1284
1285 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1286 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1287
1288 al->al_alloced++;
1289
1290 gfs2_statfs_change(sdp, 0, -1, 0);
1291 gfs2_quota_change(ip, +1, ip->i_di.di_uid, ip->i_di.di_gid);
1292 gfs2_trans_add_unrevoke(sdp, block);
1293
1294 spin_lock(&sdp->sd_rindex_spin);
1295 rgd->rd_free_clone--;
1296 spin_unlock(&sdp->sd_rindex_spin);
1297
1298 return block;
1299 }
1300
1301 /**
1302 * gfs2_alloc_di - Allocate a dinode
1303 * @dip: the directory that the inode is going in
1304 *
1305 * Returns: the block allocated
1306 */
1307
1308 uint64_t gfs2_alloc_di(struct gfs2_inode *dip)
1309 {
1310 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
1311 struct gfs2_alloc *al = &dip->i_alloc;
1312 struct gfs2_rgrpd *rgd = al->al_rgd;
1313 uint32_t blk;
1314 uint64_t block;
1315
1316 blk = rgblk_search(rgd, rgd->rd_last_alloc_meta,
1317 GFS2_BLKST_FREE, GFS2_BLKST_DINODE);
1318
1319 rgd->rd_last_alloc_meta = blk;
1320
1321 block = rgd->rd_ri.ri_data0 + blk;
1322
1323 gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free);
1324 rgd->rd_rg.rg_free--;
1325 rgd->rd_rg.rg_dinodes++;
1326
1327 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1328 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1329
1330 al->al_alloced++;
1331
1332 gfs2_statfs_change(sdp, 0, -1, +1);
1333 gfs2_trans_add_unrevoke(sdp, block);
1334
1335 spin_lock(&sdp->sd_rindex_spin);
1336 rgd->rd_free_clone--;
1337 spin_unlock(&sdp->sd_rindex_spin);
1338
1339 return block;
1340 }
1341
1342 /**
1343 * gfs2_free_data - free a contiguous run of data block(s)
1344 * @ip: the inode these blocks are being freed from
1345 * @bstart: first block of a run of contiguous blocks
1346 * @blen: the length of the block run
1347 *
1348 */
1349
1350 void gfs2_free_data(struct gfs2_inode *ip, uint64_t bstart, uint32_t blen)
1351 {
1352 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1353 struct gfs2_rgrpd *rgd;
1354
1355 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
1356 if (!rgd)
1357 return;
1358
1359 rgd->rd_rg.rg_free += blen;
1360
1361 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1362 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1363
1364 gfs2_trans_add_rg(rgd);
1365
1366 gfs2_statfs_change(sdp, 0, +blen, 0);
1367 gfs2_quota_change(ip, -(int64_t)blen,
1368 ip->i_di.di_uid, ip->i_di.di_gid);
1369 }
1370
1371 /**
1372 * gfs2_free_meta - free a contiguous run of data block(s)
1373 * @ip: the inode these blocks are being freed from
1374 * @bstart: first block of a run of contiguous blocks
1375 * @blen: the length of the block run
1376 *
1377 */
1378
1379 void gfs2_free_meta(struct gfs2_inode *ip, uint64_t bstart, uint32_t blen)
1380 {
1381 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1382 struct gfs2_rgrpd *rgd;
1383
1384 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
1385 if (!rgd)
1386 return;
1387
1388 rgd->rd_rg.rg_free += blen;
1389
1390 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1391 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1392
1393 gfs2_trans_add_rg(rgd);
1394
1395 gfs2_statfs_change(sdp, 0, +blen, 0);
1396 gfs2_quota_change(ip, -(int64_t)blen, ip->i_di.di_uid, ip->i_di.di_gid);
1397 gfs2_meta_wipe(ip, bstart, blen);
1398 }
1399
1400 void gfs2_unlink_di(struct inode *inode)
1401 {
1402 struct gfs2_inode *ip = GFS2_I(inode);
1403 struct gfs2_sbd *sdp = GFS2_SB(inode);
1404 struct gfs2_rgrpd *rgd;
1405 u64 blkno = ip->i_num.no_addr;
1406
1407 rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED);
1408 if (!rgd)
1409 return;
1410 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1411 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1412 gfs2_trans_add_rg(rgd);
1413 }
1414
1415 void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, uint64_t blkno)
1416 {
1417 struct gfs2_sbd *sdp = rgd->rd_sbd;
1418 struct gfs2_rgrpd *tmp_rgd;
1419
1420 tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE);
1421 if (!tmp_rgd)
1422 return;
1423 gfs2_assert_withdraw(sdp, rgd == tmp_rgd);
1424
1425 if (!rgd->rd_rg.rg_dinodes)
1426 gfs2_consist_rgrpd(rgd);
1427 rgd->rd_rg.rg_dinodes--;
1428 rgd->rd_rg.rg_free++;
1429
1430 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1431 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1432
1433 gfs2_statfs_change(sdp, 0, +1, -1);
1434 gfs2_trans_add_rg(rgd);
1435 }
1436
1437
1438 void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
1439 {
1440 gfs2_free_uninit_di(rgd, ip->i_num.no_addr);
1441 gfs2_quota_change(ip, -1, ip->i_di.di_uid, ip->i_di.di_gid);
1442 gfs2_meta_wipe(ip, ip->i_num.no_addr, 1);
1443 }
1444
1445 /**
1446 * gfs2_rlist_add - add a RG to a list of RGs
1447 * @sdp: the filesystem
1448 * @rlist: the list of resource groups
1449 * @block: the block
1450 *
1451 * Figure out what RG a block belongs to and add that RG to the list
1452 *
1453 * FIXME: Don't use NOFAIL
1454 *
1455 */
1456
1457 void gfs2_rlist_add(struct gfs2_sbd *sdp, struct gfs2_rgrp_list *rlist,
1458 uint64_t block)
1459 {
1460 struct gfs2_rgrpd *rgd;
1461 struct gfs2_rgrpd **tmp;
1462 unsigned int new_space;
1463 unsigned int x;
1464
1465 if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
1466 return;
1467
1468 rgd = gfs2_blk2rgrpd(sdp, block);
1469 if (!rgd) {
1470 if (gfs2_consist(sdp))
1471 fs_err(sdp, "block = %llu\n", (unsigned long long)block);
1472 return;
1473 }
1474
1475 for (x = 0; x < rlist->rl_rgrps; x++)
1476 if (rlist->rl_rgd[x] == rgd)
1477 return;
1478
1479 if (rlist->rl_rgrps == rlist->rl_space) {
1480 new_space = rlist->rl_space + 10;
1481
1482 tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
1483 GFP_KERNEL | __GFP_NOFAIL);
1484
1485 if (rlist->rl_rgd) {
1486 memcpy(tmp, rlist->rl_rgd,
1487 rlist->rl_space * sizeof(struct gfs2_rgrpd *));
1488 kfree(rlist->rl_rgd);
1489 }
1490
1491 rlist->rl_space = new_space;
1492 rlist->rl_rgd = tmp;
1493 }
1494
1495 rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
1496 }
1497
1498 /**
1499 * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
1500 * and initialize an array of glock holders for them
1501 * @rlist: the list of resource groups
1502 * @state: the lock state to acquire the RG lock in
1503 * @flags: the modifier flags for the holder structures
1504 *
1505 * FIXME: Don't use NOFAIL
1506 *
1507 */
1508
1509 void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state,
1510 int flags)
1511 {
1512 unsigned int x;
1513
1514 rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder),
1515 GFP_KERNEL | __GFP_NOFAIL);
1516 for (x = 0; x < rlist->rl_rgrps; x++)
1517 gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
1518 state, flags,
1519 &rlist->rl_ghs[x]);
1520 }
1521
1522 /**
1523 * gfs2_rlist_free - free a resource group list
1524 * @list: the list of resource groups
1525 *
1526 */
1527
1528 void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
1529 {
1530 unsigned int x;
1531
1532 kfree(rlist->rl_rgd);
1533
1534 if (rlist->rl_ghs) {
1535 for (x = 0; x < rlist->rl_rgrps; x++)
1536 gfs2_holder_uninit(&rlist->rl_ghs[x]);
1537 kfree(rlist->rl_ghs);
1538 }
1539 }
1540