[GFS2] Missed deletion of debugging code
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / gfs2 / lops.c
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
8 */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <asm/semaphore.h>
16
17 #include "gfs2.h"
18 #include "glock.h"
19 #include "log.h"
20 #include "lops.h"
21 #include "meta_io.h"
22 #include "recovery.h"
23 #include "rgrp.h"
24 #include "trans.h"
25
26 static void glock_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
27 {
28 struct gfs2_glock *gl;
29
30 get_transaction->tr_touched = 1;
31
32 if (!list_empty(&le->le_list))
33 return;
34
35 gl = container_of(le, struct gfs2_glock, gl_le);
36 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(gl)))
37 return;
38 gfs2_glock_hold(gl);
39 set_bit(GLF_DIRTY, &gl->gl_flags);
40
41 gfs2_log_lock(sdp);
42 sdp->sd_log_num_gl++;
43 list_add(&le->le_list, &sdp->sd_log_le_gl);
44 gfs2_log_unlock(sdp);
45 }
46
47 static void glock_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
48 {
49 struct list_head *head = &sdp->sd_log_le_gl;
50 struct gfs2_glock *gl;
51
52 while (!list_empty(head)) {
53 gl = list_entry(head->next, struct gfs2_glock, gl_le.le_list);
54 list_del_init(&gl->gl_le.le_list);
55 sdp->sd_log_num_gl--;
56
57 gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(gl));
58 gfs2_glock_put(gl);
59 }
60 gfs2_assert_warn(sdp, !sdp->sd_log_num_gl);
61 }
62
63 static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
64 {
65 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
66 struct gfs2_trans *tr;
67
68 if (!list_empty(&bd->bd_list_tr))
69 return;
70
71 tr = get_transaction;
72 tr->tr_touched = 1;
73 tr->tr_num_buf++;
74 list_add(&bd->bd_list_tr, &tr->tr_list_buf);
75
76 if (!list_empty(&le->le_list))
77 return;
78
79 gfs2_trans_add_gl(bd->bd_gl);
80
81 gfs2_meta_check(sdp, bd->bd_bh);
82 gfs2_pin(sdp, bd->bd_bh);
83
84 gfs2_log_lock(sdp);
85 sdp->sd_log_num_buf++;
86 list_add(&le->le_list, &sdp->sd_log_le_buf);
87 gfs2_log_unlock(sdp);
88
89 tr->tr_num_buf_new++;
90 }
91
92 static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
93 {
94 struct list_head *head = &tr->tr_list_buf;
95 struct gfs2_bufdata *bd;
96
97 while (!list_empty(head)) {
98 bd = list_entry(head->next, struct gfs2_bufdata, bd_list_tr);
99 list_del_init(&bd->bd_list_tr);
100 tr->tr_num_buf--;
101 }
102 gfs2_assert_warn(sdp, !tr->tr_num_buf);
103 }
104
105 static void buf_lo_before_commit(struct gfs2_sbd *sdp)
106 {
107 struct buffer_head *bh;
108 struct gfs2_log_descriptor *ld;
109 struct gfs2_bufdata *bd1 = NULL, *bd2;
110 unsigned int total = sdp->sd_log_num_buf;
111 unsigned int offset = sizeof(struct gfs2_log_descriptor);
112 unsigned int limit;
113 unsigned int num;
114 unsigned n;
115 __be64 *ptr;
116
117 offset += (sizeof(__be64) - 1);
118 offset &= ~(sizeof(__be64) - 1);
119 limit = (sdp->sd_sb.sb_bsize - offset)/sizeof(__be64);
120 /* for 4k blocks, limit = 503 */
121
122 bd1 = bd2 = list_prepare_entry(bd1, &sdp->sd_log_le_buf, bd_le.le_list);
123 while(total) {
124 num = total;
125 if (total > limit)
126 num = limit;
127 bh = gfs2_log_get_buf(sdp);
128 ld = (struct gfs2_log_descriptor *)bh->b_data;
129 ptr = (__be64 *)(bh->b_data + offset);
130 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
131 ld->ld_header.mh_type = cpu_to_be16(GFS2_METATYPE_LD);
132 ld->ld_header.mh_format = cpu_to_be16(GFS2_FORMAT_LD);
133 ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_METADATA);
134 ld->ld_length = cpu_to_be32(num + 1);
135 ld->ld_data1 = cpu_to_be32(num);
136 ld->ld_data2 = cpu_to_be32(0);
137 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
138
139 n = 0;
140 list_for_each_entry_continue(bd1, &sdp->sd_log_le_buf, bd_le.le_list) {
141 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
142 if (++n >= num)
143 break;
144 }
145
146 set_buffer_dirty(bh);
147 ll_rw_block(WRITE, 1, &bh);
148
149 n = 0;
150 list_for_each_entry_continue(bd2, &sdp->sd_log_le_buf, bd_le.le_list) {
151 bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
152 set_buffer_dirty(bh);
153 ll_rw_block(WRITE, 1, &bh);
154 if (++n >= num)
155 break;
156 }
157
158 total -= num;
159 }
160 }
161
162 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
163 {
164 struct list_head *head = &sdp->sd_log_le_buf;
165 struct gfs2_bufdata *bd;
166
167 while (!list_empty(head)) {
168 bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
169 list_del_init(&bd->bd_le.le_list);
170 sdp->sd_log_num_buf--;
171
172 gfs2_unpin(sdp, bd->bd_bh, ai);
173 }
174 gfs2_assert_warn(sdp, !sdp->sd_log_num_buf);
175 }
176
177 static void buf_lo_before_scan(struct gfs2_jdesc *jd,
178 struct gfs2_log_header *head, int pass)
179 {
180 struct gfs2_sbd *sdp = get_v2ip(jd->jd_inode)->i_sbd;
181
182 if (pass != 0)
183 return;
184
185 sdp->sd_found_blocks = 0;
186 sdp->sd_replayed_blocks = 0;
187 }
188
189 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
190 struct gfs2_log_descriptor *ld, __be64 *ptr,
191 int pass)
192 {
193 struct gfs2_sbd *sdp = get_v2ip(jd->jd_inode)->i_sbd;
194 struct gfs2_glock *gl = get_v2ip(jd->jd_inode)->i_gl;
195 unsigned int blks = be32_to_cpu(ld->ld_data1);
196 struct buffer_head *bh_log, *bh_ip;
197 uint64_t blkno;
198 int error = 0;
199
200 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
201 return 0;
202
203 gfs2_replay_incr_blk(sdp, &start);
204
205 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
206 blkno = be64_to_cpu(*ptr++);
207
208 sdp->sd_found_blocks++;
209
210 if (gfs2_revoke_check(sdp, blkno, start))
211 continue;
212
213 error = gfs2_replay_read_block(jd, start, &bh_log);
214 if (error)
215 return error;
216
217 bh_ip = gfs2_meta_new(gl, blkno);
218 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
219
220 if (gfs2_meta_check(sdp, bh_ip))
221 error = -EIO;
222 else
223 mark_buffer_dirty(bh_ip);
224
225 brelse(bh_log);
226 brelse(bh_ip);
227
228 if (error)
229 break;
230
231 sdp->sd_replayed_blocks++;
232 }
233
234 return error;
235 }
236
237 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
238 {
239 struct gfs2_sbd *sdp = get_v2ip(jd->jd_inode)->i_sbd;
240
241 if (error) {
242 gfs2_meta_sync(get_v2ip(jd->jd_inode)->i_gl, DIO_START | DIO_WAIT);
243 return;
244 }
245 if (pass != 1)
246 return;
247
248 gfs2_meta_sync(get_v2ip(jd->jd_inode)->i_gl, DIO_START | DIO_WAIT);
249
250 fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
251 jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
252 }
253
254 static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
255 {
256 struct gfs2_trans *tr;
257
258 tr = get_transaction;
259 tr->tr_touched = 1;
260 tr->tr_num_revoke++;
261
262 gfs2_log_lock(sdp);
263 sdp->sd_log_num_revoke++;
264 list_add(&le->le_list, &sdp->sd_log_le_revoke);
265 gfs2_log_unlock(sdp);
266 }
267
268 static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
269 {
270 struct gfs2_log_descriptor *ld;
271 struct gfs2_meta_header *mh;
272 struct buffer_head *bh;
273 unsigned int offset;
274 struct list_head *head = &sdp->sd_log_le_revoke;
275 struct gfs2_revoke *rv;
276
277 if (!sdp->sd_log_num_revoke)
278 return;
279
280 bh = gfs2_log_get_buf(sdp);
281 ld = (struct gfs2_log_descriptor *)bh->b_data;
282 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
283 ld->ld_header.mh_type = cpu_to_be16(GFS2_METATYPE_LD);
284 ld->ld_header.mh_format = cpu_to_be16(GFS2_FORMAT_LD);
285 ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_REVOKE);
286 ld->ld_length = cpu_to_be32(gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, sizeof(uint64_t)));
287 ld->ld_data1 = cpu_to_be32(sdp->sd_log_num_revoke);
288 ld->ld_data2 = cpu_to_be32(0);
289 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
290 offset = sizeof(struct gfs2_log_descriptor);
291
292 while (!list_empty(head)) {
293 rv = list_entry(head->next, struct gfs2_revoke, rv_le.le_list);
294 list_del_init(&rv->rv_le.le_list);
295 sdp->sd_log_num_revoke--;
296
297 if (offset + sizeof(uint64_t) > sdp->sd_sb.sb_bsize) {
298 set_buffer_dirty(bh);
299 ll_rw_block(WRITE, 1, &bh);
300
301 bh = gfs2_log_get_buf(sdp);
302 mh = (struct gfs2_meta_header *)bh->b_data;
303 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
304 mh->mh_type = cpu_to_be16(GFS2_METATYPE_LB);
305 mh->mh_format = cpu_to_be16(GFS2_FORMAT_LB);
306 offset = sizeof(struct gfs2_meta_header);
307 }
308
309 *(__be64 *)(bh->b_data + offset) = cpu_to_be64(rv->rv_blkno);
310 kfree(rv);
311
312 offset += sizeof(uint64_t);
313 }
314 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
315
316 set_buffer_dirty(bh);
317 ll_rw_block(WRITE, 1, &bh);
318 }
319
320 static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
321 struct gfs2_log_header *head, int pass)
322 {
323 struct gfs2_sbd *sdp = get_v2ip(jd->jd_inode)->i_sbd;
324
325 if (pass != 0)
326 return;
327
328 sdp->sd_found_revokes = 0;
329 sdp->sd_replay_tail = head->lh_tail;
330 }
331
332 static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
333 struct gfs2_log_descriptor *ld, __be64 *ptr,
334 int pass)
335 {
336 struct gfs2_sbd *sdp = get_v2ip(jd->jd_inode)->i_sbd;
337 unsigned int blks = be32_to_cpu(ld->ld_length);
338 unsigned int revokes = be32_to_cpu(ld->ld_data1);
339 struct buffer_head *bh;
340 unsigned int offset;
341 uint64_t blkno;
342 int first = 1;
343 int error;
344
345 if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
346 return 0;
347
348 offset = sizeof(struct gfs2_log_descriptor);
349
350 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
351 error = gfs2_replay_read_block(jd, start, &bh);
352 if (error)
353 return error;
354
355 if (!first)
356 gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
357
358 while (offset + sizeof(uint64_t) <= sdp->sd_sb.sb_bsize) {
359 blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
360
361 error = gfs2_revoke_add(sdp, blkno, start);
362 if (error < 0)
363 return error;
364 else if (error)
365 sdp->sd_found_revokes++;
366
367 if (!--revokes)
368 break;
369 offset += sizeof(uint64_t);
370 }
371
372 brelse(bh);
373 offset = sizeof(struct gfs2_meta_header);
374 first = 0;
375 }
376
377 return 0;
378 }
379
380 static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
381 {
382 struct gfs2_sbd *sdp = get_v2ip(jd->jd_inode)->i_sbd;
383
384 if (error) {
385 gfs2_revoke_clean(sdp);
386 return;
387 }
388 if (pass != 1)
389 return;
390
391 fs_info(sdp, "jid=%u: Found %u revoke tags\n",
392 jd->jd_jid, sdp->sd_found_revokes);
393
394 gfs2_revoke_clean(sdp);
395 }
396
397 static void rg_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
398 {
399 struct gfs2_rgrpd *rgd;
400
401 get_transaction->tr_touched = 1;
402
403 if (!list_empty(&le->le_list))
404 return;
405
406 rgd = container_of(le, struct gfs2_rgrpd, rd_le);
407 gfs2_rgrp_bh_hold(rgd);
408
409 gfs2_log_lock(sdp);
410 sdp->sd_log_num_rg++;
411 list_add(&le->le_list, &sdp->sd_log_le_rg);
412 gfs2_log_unlock(sdp);
413 }
414
415 static void rg_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
416 {
417 struct list_head *head = &sdp->sd_log_le_rg;
418 struct gfs2_rgrpd *rgd;
419
420 while (!list_empty(head)) {
421 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_le.le_list);
422 list_del_init(&rgd->rd_le.le_list);
423 sdp->sd_log_num_rg--;
424
425 gfs2_rgrp_repolish_clones(rgd);
426 gfs2_rgrp_bh_put(rgd);
427 }
428 gfs2_assert_warn(sdp, !sdp->sd_log_num_rg);
429 }
430
431 /**
432 * databuf_lo_add - Add a databuf to the transaction.
433 *
434 * This is used in two distinct cases:
435 * i) In ordered write mode
436 * We put the data buffer on a list so that we can ensure that its
437 * synced to disk at the right time
438 * ii) In journaled data mode
439 * We need to journal the data block in the same way as metadata in
440 * the functions above. The difference is that here we have a tag
441 * which is two __be64's being the block number (as per meta data)
442 * and a flag which says whether the data block needs escaping or
443 * not. This means we need a new log entry for each 251 or so data
444 * blocks, which isn't an enormous overhead but twice as much as
445 * for normal metadata blocks.
446 */
447 static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
448 {
449 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
450 struct gfs2_trans *tr = get_transaction;
451 struct address_space *mapping = bd->bd_bh->b_page->mapping;
452 struct gfs2_inode *ip = get_v2ip(mapping->host);
453
454 tr->tr_touched = 1;
455 if (!list_empty(&bd->bd_list_tr) &&
456 (ip->i_di.di_flags & GFS2_DIF_JDATA)) {
457 tr->tr_num_buf++;
458 gfs2_trans_add_gl(bd->bd_gl);
459 list_add(&bd->bd_list_tr, &tr->tr_list_buf);
460 gfs2_pin(sdp, bd->bd_bh);
461 }
462 gfs2_log_lock(sdp);
463 if (!list_empty(&le->le_list)) {
464 if (ip->i_di.di_flags & GFS2_DIF_JDATA)
465 sdp->sd_log_num_jdata++;
466 sdp->sd_log_num_databuf++;
467 list_add(&le->le_list, &sdp->sd_log_le_databuf);
468 }
469 gfs2_log_unlock(sdp);
470 }
471
472 static int gfs2_check_magic(struct buffer_head *bh)
473 {
474 struct page *page = bh->b_page;
475 void *kaddr;
476 __be32 *ptr;
477 int rv = 0;
478
479 kaddr = kmap_atomic(page, KM_USER0);
480 ptr = kaddr + bh_offset(bh);
481 if (*ptr == cpu_to_be32(GFS2_MAGIC))
482 rv = 1;
483 kunmap_atomic(page, KM_USER0);
484
485 return rv;
486 }
487
488 /**
489 * databuf_lo_before_commit - Scan the data buffers, writing as we go
490 *
491 * Here we scan through the lists of buffers and make the assumption
492 * that any buffer thats been pinned is being journaled, and that
493 * any unpinned buffer is an ordered write data buffer and therefore
494 * will be written back rather than journaled.
495 */
496 static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
497 {
498 LIST_HEAD(started);
499 struct gfs2_bufdata *bd1 = NULL, *bd2, *bdt;
500 struct buffer_head *bh = NULL;
501 unsigned int offset = sizeof(struct gfs2_log_descriptor);
502 struct gfs2_log_descriptor *ld;
503 unsigned int limit;
504 unsigned int total_dbuf = sdp->sd_log_num_databuf;
505 unsigned int total_jdata = sdp->sd_log_num_jdata;
506 unsigned int num, n;
507 __be64 *ptr = NULL;
508
509 offset += (2*sizeof(__be64) - 1);
510 offset &= ~(2*sizeof(__be64) - 1);
511 limit = (sdp->sd_sb.sb_bsize - offset)/sizeof(__be64);
512
513 /*
514 * Start writing ordered buffers, write journaled buffers
515 * into the log along with a header
516 */
517 gfs2_log_lock(sdp);
518 bd2 = bd1 = list_prepare_entry(bd1, &sdp->sd_log_le_databuf, bd_le.le_list);
519 while(total_dbuf) {
520 num = total_jdata;
521 if (num > limit)
522 num = limit;
523 n = 0;
524 list_for_each_entry_safe_continue(bd1, bdt, &sdp->sd_log_le_databuf, bd_le.le_list) {
525 /* An ordered write buffer */
526 if (bd1->bd_bh && !buffer_pinned(bd1->bd_bh)) {
527 list_move(&bd1->bd_le.le_list, &started);
528 if (bd1 == bd2) {
529 bd2 = NULL;
530 bd2 = list_prepare_entry(bd2, &sdp->sd_log_le_databuf, bd_le.le_list);
531 }
532 total_dbuf--;
533 if (bd1->bd_bh) {
534 get_bh(bd1->bd_bh);
535 if (buffer_dirty(bd1->bd_bh)) {
536 gfs2_log_unlock(sdp);
537 wait_on_buffer(bd1->bd_bh);
538 ll_rw_block(WRITE, 1, &bd1->bd_bh);
539 gfs2_log_lock(sdp);
540 }
541 brelse(bd1->bd_bh);
542 continue;
543 }
544 continue;
545 } else if (bd1->bd_bh) { /* A journaled buffer */
546 int magic;
547 gfs2_log_unlock(sdp);
548 if (!bh) {
549 bh = gfs2_log_get_buf(sdp);
550 ld = (struct gfs2_log_descriptor *)bh->b_data;
551 ptr = (__be64 *)(bh->b_data + offset);
552 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
553 ld->ld_header.mh_type = cpu_to_be16(GFS2_METATYPE_LD);
554 ld->ld_header.mh_format = cpu_to_be16(GFS2_FORMAT_LD);
555 ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_JDATA);
556 ld->ld_length = cpu_to_be32(num + 1);
557 ld->ld_data1 = cpu_to_be32(num);
558 ld->ld_data2 = cpu_to_be32(0);
559 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
560 }
561 magic = gfs2_check_magic(bd1->bd_bh);
562 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
563 *ptr++ = cpu_to_be64((__u64)magic);
564 clear_buffer_escaped(bd1->bd_bh);
565 if (unlikely(magic != 0))
566 set_buffer_escaped(bd1->bd_bh);
567 gfs2_log_lock(sdp);
568 if (n++ > num)
569 break;
570 }
571 }
572 gfs2_log_unlock(sdp);
573 if (bh) {
574 set_buffer_dirty(bh);
575 ll_rw_block(WRITE, 1, &bh);
576 bh = NULL;
577 }
578 n = 0;
579 gfs2_log_lock(sdp);
580 list_for_each_entry_continue(bd2, &sdp->sd_log_le_databuf, bd_le.le_list) {
581 if (!bd2->bd_bh)
582 continue;
583 /* copy buffer if it needs escaping */
584 gfs2_log_unlock(sdp);
585 if (unlikely(buffer_escaped(bd2->bd_bh))) {
586 void *kaddr;
587 struct page *page = bd2->bd_bh->b_page;
588 bh = gfs2_log_get_buf(sdp);
589 kaddr = kmap_atomic(page, KM_USER0);
590 memcpy(bh->b_data, kaddr + bh_offset(bd2->bd_bh), sdp->sd_sb.sb_bsize);
591 kunmap_atomic(page, KM_USER0);
592 *(__be32 *)bh->b_data = 0;
593 } else {
594 bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
595 }
596 set_buffer_dirty(bh);
597 ll_rw_block(WRITE, 1, &bh);
598 gfs2_log_lock(sdp);
599 if (++n >= num)
600 break;
601 }
602 bh = NULL;
603 total_dbuf -= num;
604 total_jdata -= num;
605 }
606 gfs2_log_unlock(sdp);
607
608 /* Wait on all ordered buffers */
609 while (!list_empty(&started)) {
610 gfs2_log_lock(sdp);
611 bd1 = list_entry(started.next, struct gfs2_bufdata, bd_le.le_list);
612 list_del(&bd1->bd_le.le_list);
613 sdp->sd_log_num_databuf--;
614
615 bh = bd1->bd_bh;
616 if (bh) {
617 set_v2bd(bh, NULL);
618 gfs2_log_unlock(sdp);
619 wait_on_buffer(bh);
620 brelse(bh);
621 } else
622 gfs2_log_unlock(sdp);
623
624 kfree(bd1);
625 }
626
627 /* We've removed all the ordered write bufs here, so only jdata left */
628 gfs2_assert_warn(sdp, sdp->sd_log_num_databuf == sdp->sd_log_num_jdata);
629 }
630
631 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
632 struct gfs2_log_descriptor *ld,
633 __be64 *ptr, int pass)
634 {
635 struct gfs2_sbd *sdp = get_v2ip(jd->jd_inode)->i_sbd;
636 struct gfs2_glock *gl = get_v2ip(jd->jd_inode)->i_gl;
637 unsigned int blks = be32_to_cpu(ld->ld_data1);
638 struct buffer_head *bh_log, *bh_ip;
639 uint64_t blkno;
640 uint64_t esc;
641 int error = 0;
642
643 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
644 return 0;
645
646 gfs2_replay_incr_blk(sdp, &start);
647 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
648 blkno = be64_to_cpu(*ptr++);
649 esc = be64_to_cpu(*ptr++);
650
651 sdp->sd_found_blocks++;
652
653 if (gfs2_revoke_check(sdp, blkno, start))
654 continue;
655
656 error = gfs2_replay_read_block(jd, start, &bh_log);
657 if (error)
658 return error;
659
660 bh_ip = gfs2_meta_new(gl, blkno);
661 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
662
663 /* Unescape */
664 if (esc) {
665 __be32 *eptr = (__be32 *)bh_ip->b_data;
666 *eptr = cpu_to_be32(GFS2_MAGIC);
667 }
668 mark_buffer_dirty(bh_ip);
669
670 brelse(bh_log);
671 brelse(bh_ip);
672 if (error)
673 break;
674
675 sdp->sd_replayed_blocks++;
676 }
677
678 return error;
679 }
680
681 /* FIXME: sort out accounting for log blocks etc. */
682
683 static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
684 {
685 struct gfs2_sbd *sdp = get_v2ip(jd->jd_inode)->i_sbd;
686
687 if (error) {
688 gfs2_meta_sync(get_v2ip(jd->jd_inode)->i_gl, DIO_START | DIO_WAIT);
689 return;
690 }
691 if (pass != 1)
692 return;
693
694 /* data sync? */
695 gfs2_meta_sync(get_v2ip(jd->jd_inode)->i_gl, DIO_START | DIO_WAIT);
696
697 fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
698 jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
699 }
700
701 static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
702 {
703 struct list_head *head = &sdp->sd_log_le_databuf;
704 struct gfs2_bufdata *bd;
705
706 while (!list_empty(head)) {
707 bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
708 list_del(&bd->bd_le.le_list);
709 sdp->sd_log_num_databuf--;
710 sdp->sd_log_num_jdata--;
711 gfs2_unpin(sdp, bd->bd_bh, ai);
712 }
713 gfs2_assert_warn(sdp, !sdp->sd_log_num_databuf);
714 gfs2_assert_warn(sdp, !sdp->sd_log_num_jdata);
715 }
716
717
718 struct gfs2_log_operations gfs2_glock_lops = {
719 .lo_add = glock_lo_add,
720 .lo_after_commit = glock_lo_after_commit,
721 .lo_name = "glock"
722 };
723
724 struct gfs2_log_operations gfs2_buf_lops = {
725 .lo_add = buf_lo_add,
726 .lo_incore_commit = buf_lo_incore_commit,
727 .lo_before_commit = buf_lo_before_commit,
728 .lo_after_commit = buf_lo_after_commit,
729 .lo_before_scan = buf_lo_before_scan,
730 .lo_scan_elements = buf_lo_scan_elements,
731 .lo_after_scan = buf_lo_after_scan,
732 .lo_name = "buf"
733 };
734
735 struct gfs2_log_operations gfs2_revoke_lops = {
736 .lo_add = revoke_lo_add,
737 .lo_before_commit = revoke_lo_before_commit,
738 .lo_before_scan = revoke_lo_before_scan,
739 .lo_scan_elements = revoke_lo_scan_elements,
740 .lo_after_scan = revoke_lo_after_scan,
741 .lo_name = "revoke"
742 };
743
744 struct gfs2_log_operations gfs2_rg_lops = {
745 .lo_add = rg_lo_add,
746 .lo_after_commit = rg_lo_after_commit,
747 .lo_name = "rg"
748 };
749
750 struct gfs2_log_operations gfs2_databuf_lops = {
751 .lo_add = databuf_lo_add,
752 .lo_incore_commit = buf_lo_incore_commit,
753 .lo_before_commit = databuf_lo_before_commit,
754 .lo_after_commit = databuf_lo_after_commit,
755 .lo_scan_elements = databuf_lo_scan_elements,
756 .lo_after_scan = databuf_lo_after_scan,
757 .lo_name = "databuf"
758 };
759
760 struct gfs2_log_operations *gfs2_log_ops[] = {
761 &gfs2_glock_lops,
762 &gfs2_buf_lops,
763 &gfs2_revoke_lops,
764 &gfs2_rg_lops,
765 &gfs2_databuf_lops,
766 NULL
767 };
768